diff --git a/unwarp_rectify/CameraTrax_24ColorCard_2x3in.py b/unwarp_rectify/CameraTrax_24ColorCard_2x3in.py new file mode 100644 index 0000000..ab6bcd2 --- /dev/null +++ b/unwarp_rectify/CameraTrax_24ColorCard_2x3in.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +""" +Created on Wed May 21 14:09:56 2014 + +@author: chuong +""" +from __future__ import absolute_import, division, print_function + + +import cv2 +import numpy as np +from matplotlib import pylab as plt + +# RED GRN BLU NAME +CameraTrax_24ColorCard = [ [115, 83, 68],\ + [196,147,127],\ + [ 91,122,155],\ + [ 94,108, 66],\ + [129,128,176],\ + [ 98,190,168],\ + [223,124, 47],\ + [ 72, 92,174],\ + [194, 82, 96],\ + [ 93, 60,103],\ + [162,190, 62],\ + [229,158, 41],\ + [ 49, 66,147],\ + [ 77,153, 71],\ + [173, 57, 60],\ + [241,201, 25],\ + [190, 85,150],\ + [ 0,135,166],\ + [242,243,245],\ + [203,203,204],\ + [162,163,162],\ + [120,120,120],\ + [ 84, 84, 84],\ + [ 50, 50, 52]] +ColourNames = ['DrkTone', 'LtTone', 'SkyBlue', 'Tree-Grn', 'LtBlu', 'Blu-Grn', \ + 'Orange', 'MedBlu', 'LtRed', 'Purple', 'Yel-Grn', 'Org-Grn', \ + 'Blue', 'Green', 'Red', 'Yellow', 'Magenta', 'Cyan', \ + 'White', 'LtGrey', 'Grey', 'DrkGrey', 'Charcoal', 'Black'] +SquareSize = 50 # pixels +P24ColorCard = np.zeros([SquareSize*4, SquareSize*6, 3], dtype = np.uint8) +for i,Colour in enumerate(CameraTrax_24ColorCard): + R,G,B = Colour + Row = int(i/6) + Col = i - Row*6 + P24ColorCard[Row*SquareSize:(Row+1)*SquareSize, Col*SquareSize:(Col+1)*SquareSize, 0] = R + P24ColorCard[Row*SquareSize:(Row+1)*SquareSize, Col*SquareSize:(Col+1)*SquareSize, 1] = G + P24ColorCard[Row*SquareSize:(Row+1)*SquareSize, Col*SquareSize:(Col+1)*SquareSize, 2] = B + +P24ColorCard_BGR = P24ColorCard[:,:,::-1] +cv2.imwrite('CameraTrax_24ColorCard_2x3in.png', P24ColorCard_BGR) +P24ColorCard_BGR2 = cv2.imread('CameraTrax_24ColorCard_2x3in.png') +if (P24ColorCard_BGR2 != P24ColorCard_BGR).any(): + print('Output image is not the same as the actual image') + +plt.imshow(P24ColorCard) +plt.show() diff --git a/unwarp_rectify/correctDistortionAndColor.py b/unwarp_rectify/correctDistortionAndColor.py new file mode 100644 index 0000000..b6e5c19 --- /dev/null +++ b/unwarp_rectify/correctDistortionAndColor.py @@ -0,0 +1,357 @@ +# -*- coding: utf-8 -*- +""" +Created on Tue May 13 16:17:53 2014 + +@author: chuong +""" +from __future__ import absolute_import, division, print_function + +import numpy as np +from matplotlib import pyplot as plt +import getopt, sys, os +import cv2 +import glob +from scipy import optimize +from timestream.parse import ts_iter_images +from multiprocessing import Pool +import utils +import json + +global isShow +isShow= False + + +def correctDistortionAndColor(Arg): + ImageFile_, RotationAngle, UndistMapX, UndistMapY, P24ColorCardCaptured_PyramidImages, colorcardPosition, Colors, Tray_PyramidImagesList, trayPositions, Pot_PyramidImages, OutputFile = Arg + Image = cv2.imread(ImageFile_)[:,:,::-1] # read and convert to R-G-B image + print(ImageFile_) + if Image == None: + print('Cannot read file') + return + + Image = utils.rotateImage(Image, RotationAngle) + if UndistMapX != None: + Image = cv2.remap(Image.astype(np.uint8), UndistMapX, UndistMapY, cv2.INTER_CUBIC) + + # set fallback settings + ImageCorrected = Image + ColorCardScore = None + ColorCardLoc = None + ColorCorrectionError = None + TrayLocs = None + PotLocs = None + + meanIntensity = np.mean(Image.astype(np.float)) + if meanIntensity < 10: + print('meanIntensity = ', meanIntensity) + print('Image is too dark to process') + plt.figure() + plt.imshow(Image) + plt.show() + return meanIntensity, ColorCardScore, ColorCardLoc, ColorCorrectionError, TrayLocs, PotLocs + + PyramidImages = utils.createImagePyramid(Image) + SearchRange = [P24ColorCardCaptured_PyramidImages[0].shape[1]//2, P24ColorCardCaptured_PyramidImages[0].shape[0]//2] + ColorCardScore, ColorCardLoc, ColorCardAngle = utils.matchTemplatePyramid(PyramidImages, P24ColorCardCaptured_PyramidImages, 0, EstimatedLocation = colorcardPosition, SearchRange = SearchRange) + if ColorCardScore > 0.3: + Image = utils.rotateImage(Image, ColorCardAngle) + ColorCardCaptured = Image[ColorCardLoc[1]-P24ColorCardCaptured_PyramidImages[0].shape[0]//2:ColorCardLoc[1]+P24ColorCardCaptured_PyramidImages[0].shape[0]//2, \ + ColorCardLoc[0]-P24ColorCardCaptured_PyramidImages[0].shape[1]//2:ColorCardLoc[0]+P24ColorCardCaptured_PyramidImages[0].shape[1]//2] + + Captured_Colors = np.zeros([3,24]) + STD_Colors = np.zeros([24]) + SquareSize2 = int(ColorCardCaptured.shape[0]/4) + HalfSquareSize2 = int(SquareSize2/2) + for i in range(24): + Row = int(i/6) + Col = i - Row*6 + rr = Row*SquareSize2 + HalfSquareSize2 + cc = Col*SquareSize2 + HalfSquareSize2 + Captured_R = ColorCardCaptured[rr-10:rr+10, cc-10:cc+10, 0].astype(np.float) + Captured_G = ColorCardCaptured[rr-10:rr+10, cc-10:cc+10, 1].astype(np.float) + Captured_B = ColorCardCaptured[rr-10:rr+10, cc-10:cc+10, 2].astype(np.float) + STD_Colors[i] = np.std(Captured_R) + np.std(Captured_G) + np.std(Captured_B) + Captured_R = np.sum(Captured_R)/Captured_R.size + Captured_G = np.sum(Captured_G)/Captured_G.size + Captured_B = np.sum(Captured_B)/Captured_B.size + Captured_Colors[0,i] = Captured_R + Captured_Colors[1,i] = Captured_G + Captured_Colors[2,i] = Captured_B + + # initial values + ColorMatrix = np.eye(3) + ColorConstant = np.zeros([3,1]) + ColorGamma = np.ones([3,1]) + + Arg2 = np.zeros([9 + 3 + 3]) + Arg2[:9] = ColorMatrix.reshape([9]) + Arg2[9:12] = ColorConstant.reshape([3]) + Arg2[12:15] = ColorGamma.reshape([3]) + + ArgRefined, _ = optimize.leastsq(utils.getColorMatchingErrorVectorised, Arg2, args=(Colors, Captured_Colors), maxfev=10000) + + ErrrorList = utils.getColorMatchingErrorVectorised(ArgRefined, Colors, Captured_Colors) + ColorCorrectionError = np.sum(np.asarray(ErrrorList)) + + ColorMatrix = ArgRefined[:9].reshape([3,3]) + ColorConstant = ArgRefined[9:12].reshape([3,1]) + ColorGamma = ArgRefined[12:15] + + ImageCorrected = utils.correctColorVectorised(Image.astype(np.float), ColorMatrix, ColorConstant, ColorGamma) + ImageCorrected[np.where(ImageCorrected < 0)] = 0 + ImageCorrected[np.where(ImageCorrected > 255)] = 255 + else: + print('Skip color correction of', ImageFile_) + + # suppress green information to improve tray and pot detection + ImageCorrected_NoGreen = np.zeros_like(ImageCorrected) + ImageCorrected_NoGreen[:,:,0] = ImageCorrected[:,:,0] + ImageCorrected_NoGreen[:,:,2] = ImageCorrected[:,:,2] + Corrected_PyramidImages = utils.createImagePyramid(ImageCorrected_NoGreen) + Corrected_PyramidImages = utils.createImagePyramid(ImageCorrected_NoGreen) + + TrayLocs = [] + PotLocs2 = [] + PotLocs2_ = [] + PotIndex = 0 + for i,Tray_PyramidImages in enumerate(Tray_PyramidImagesList): + SearchRange = [Tray_PyramidImages[0].shape[1]//6, Tray_PyramidImages[0].shape[0]//6] + TrayScore, TrayLoc, TrayAngle = utils.matchTemplatePyramid(Corrected_PyramidImages, Tray_PyramidImages, RotationAngle = 0, EstimatedLocation = trayPositions[i], SearchRange = SearchRange) + if TrayScore < 0.3: + print('Low tray matching score. Likely tray %d is missing.' %i) + TrayLocs.append(None) + continue + TrayLocs.append(TrayLoc) + + StepX = Tray_PyramidImages[0].shape[1]//4 + StepY = Tray_PyramidImages[0].shape[0]//5 + StartX = TrayLoc[0] - Tray_PyramidImages[0].shape[1]//2 + StepX//2 + StartY = TrayLoc[1] + Tray_PyramidImages[0].shape[0]//2 - StepY//2 + SearchRange = [Pot_PyramidImages[0].shape[1]//6, Pot_PyramidImages[0].shape[0]//6] +# SearchRange = [32, 32] + print('SearchRange=', SearchRange) + PotLocs = [] + PotLocs_ = [] + for k in range(4): + for l in range(5): +# if PotIndex == 70: +# global isShow +# isShow = True +# else: +# global isShow +# isShow = False + + EstimateLoc = [StartX + StepX*k, StartY - StepY*l] + PotScore, PotLoc, PotAngle = utils.matchTemplatePyramid(Corrected_PyramidImages, \ + Pot_PyramidImages, RotationAngle = 0, \ + EstimatedLocation = EstimateLoc, NoLevels = 3, SearchRange = SearchRange) + PotLocs.append(PotLoc) + PotLocs_.append(EstimateLoc) + PotIndex = PotIndex + 1 + PotLocs2.append(PotLocs) + PotLocs2_.append(PotLocs_) + +# plt.figure() +# plt.imshow(Pot_PyramidImages[0]) +# plt.figure() +# plt.imshow(ImageCorrected.astype(np.uint8)) +# plt.hold(True) +# plt.plot([ColorCardLoc[0]], [ColorCardLoc[1]], 'ys') +# plt.text(ColorCardLoc[0]-30, ColorCardLoc[1]-15, 'ColorCard', color='yellow') +# PotIndex = 0 +# for i,Loc in enumerate(TrayLocs): +# if Loc == None: +# continue +# plt.plot([Loc[0]], [Loc[1]], 'bo') +# plt.text(Loc[0], Loc[1]-15, 'T'+str(i+1), color='blue', fontsize=20) +# for PotLoc,PotLoc_ in zip(PotLocs2[i], PotLocs2_[i]): +# plt.plot([PotLoc[0]], [PotLoc[1]], 'ro') +# plt.text(PotLoc[0], PotLoc[1]-15, str(PotIndex+1), color='red') +# plt.plot([PotLoc_[0]], [PotLoc_[1]], 'rx') +# PotIndex = PotIndex + 1 +# +# plt.title(os.path.basename(ImageFile_)) +# plt.show() + + + OutputPath = os.path.dirname(OutputFile) + if not os.path.exists(OutputPath): + print('Make', OutputPath) + os.makedirs(OutputPath) + cv2.imwrite(OutputFile, np.uint8(ImageCorrected[:,:,2::-1])) # convert to B-G-R image and save + print('Saved', OutputFile) + + return OutputFile, meanIntensity, ColorCardScore, ColorCardLoc, ColorCorrectionError, TrayLocs + + +def main(argv): + HelpString = 'correctDistortionAndColor.py -i ' + \ + '-f '+ \ + '-o \n' + \ + 'Example:\n' + \ + "$ ./correctDistortionAndColor.py -f /mnt/phenocam/a_data/TimeStreams/BorevitzTest/BVZ0018/BVZ0018-GC04L~fullres-orig/ -k /mnt/phenocam/a_data/TimeStreams/BorevitzTest/BVZ0018/BVZ0018-GC04L~fullres-corr/calib_param_700Dcam.yml -g /mnt/phenocam/a_data/TimeStreams/BorevitzTest/BVZ0018/BVZ0018-GC04L~fullres-corr/CameraTrax_24ColorCard_2x3in.png -c /mnt/phenocam/a_data/TimeStreams/BorevitzTest/BVZ0018/BVZ0018-GC04L~fullres-corr/CameraTrax_24ColorCard_2x3inCaptured.png -o /mnt/phenocam/a_data/TimeStreams/BorevitzTest/BVZ0018/BVZ0018-GC04L~fullres-corr/ -j 16" + try: + opts, args = getopt.getopt(argv,"hi:f:c:k:b:g:t:o:j:",\ + ["ifile=","ifolder=","--configfolder","calibfile=","captured-colorcard=",\ + "groundtruth-colorcard=","--tray-image-pattern","ofolder=","jobs="]) + except getopt.GetoptError: + print(HelpString) + sys.exit(2) + if len(opts) == 0: + print(HelpString) + sys.exit() + + ImageFile = '' + InputRootFolder = '' + OutputFolder = '' + InitialGeometryFile = 'ColorcardTrayPotSelections.yml' + ColorCardTrueFile = 'CameraTrax_24ColorCard_2x3in.png' +# ColorCardTrueFile = 'CameraTrax_24ColorCard_2x3in180deg.png' +# ColorCardCapturedFile = 'CameraTrax_24ColorCard_2x3inCaptured.png' + ColorCardCapturedFile = 'Card_%d.png' + TrayCapturedFile = 'Tray_%d.png' + PotCapturedFile = 'PotCaptured2.png' #'PotCaptured.png' #'PotCaptured4.png' #'PotCaptured3.png' # + CalibFile = 'Canon700D_18mm_CalibParam.yml' + ConfigFolder = '' + RotationAngle = None + NoJobs = 1 + for opt, arg in opts: + if opt == '-h': + print(HelpString) + sys.exit() + elif opt in ("-i", "--ifile"): + ImageFile = arg + elif opt in ("-f", "--ifolder"): + InputRootFolder = arg + elif opt in ("-c", "--configfolder"): + ConfigFolder = arg + elif opt in ("-b", "--captured-colorcard"): + ColorCardCapturedFile = arg + elif opt in ("-g", "--groundtruth-colorcard"): + ColorCardTrueFile = arg + elif opt in ("-t", "--tray-image-pattern"): + TrayCapturedFile = arg + elif opt in ("-k", "--calibfile"): + CalibFile = arg + elif opt in ("-o", "--ofolder"): + OutputFolder = arg + elif opt in ("-j", "--jobs"): + NoJobs = int(arg) + + if len(ConfigFolder) > 0: + ColorCardTrueFile = os.path.join(ConfigFolder, os.path.basename(ColorCardTrueFile)) + ColorCardCapturedFile = os.path.join(ConfigFolder, os.path.basename(ColorCardCapturedFile)) + TrayCapturedFile = os.path.join(ConfigFolder, os.path.basename(TrayCapturedFile)) + PotCapturedFile = os.path.join(ConfigFolder, os.path.basename(PotCapturedFile)) + CalibFile = os.path.join(ConfigFolder, os.path.basename(CalibFile)) + InitialGeometryFile = os.path.join(ConfigFolder, os.path.basename(InitialGeometryFile)) + + if len(OutputFolder) > 0 and not os.path.exists(OutputFolder): + os.makedirs(OutputFolder) + + if len(CalibFile) > 0: + ImageSize, SquareSize, CameraMatrix, DistCoefs, RVecs, TVecs = utils.readCalibration(CalibFile) + print('CameraMatrix =', CameraMatrix) + print('DistCoefs =', DistCoefs) + UndistMapX, UndistMapY = cv2.initUndistortRectifyMap(CameraMatrix, DistCoefs, \ + None, CameraMatrix, ImageSize, cv2.CV_32FC1) + + if len(InitialGeometryFile): + rotationAngle, distortionCorrected, colorcardList, trayList, potList = utils.readGeometries(InitialGeometryFile) + print('trayList =', trayList) + RotationAngle = rotationAngle + colorcardCentre, colorcardWidth, colorcardHeight, colorcardAngle = utils.getRectangleParamters(colorcardList[0]) + colorcardPosition = [int(colorcardCentre[0]), int(colorcardCentre[1])] + print('colorcardPosition =', colorcardPosition) + potCentre, potWidth, potHeight, potAngle = utils.getRectangleParamters(potList[0]) + potSize = (int(potWidth), int(potHeight)) + print('potSize =', potSize) + trayPositions = [] + for tray in trayList: + trayCentre, trayWidth, trayHeight, trayAngle = utils.getRectangleParamters(tray) + trayPositions.append([int(trayCentre[0]), int(trayCentre[1])]) + + P24ColorCardTrueImage = cv2.imread(ColorCardTrueFile)[:,:,::-1] # read and convert to R-G-B image + SquareSize = int(P24ColorCardTrueImage.shape[0]/4) + HalfSquareSize = int(SquareSize/2) + + P24ColorCardCapturedImage = cv2.imread(ColorCardCapturedFile %0)[:,:,::-1] # read and convert to R-G-B image + P24ColorCardCaptured_PyramidImages = utils.createImagePyramid(P24ColorCardCapturedImage) + + Tray_PyramidImagesList = [] + for i in range(8): + TrayFilename = TrayCapturedFile %(i) + TrayImage = cv2.imread(TrayFilename) + if TrayImage == None: + print('Unable to read', TrayFilename) +# Tray_PyramidImages = None + continue + else: + TrayImage = TrayImage[:,:,::-1] + Tray_PyramidImages = utils.createImagePyramid(TrayImage) + Tray_PyramidImagesList.append(Tray_PyramidImages) + + PotCapturedImage = cv2.imread(PotCapturedFile)[:,:,::-1] # read and convert to R-G-B image + # supress green channel + PotCapturedImage[:,:,1] = 0 + if PotCapturedImage == None: + print('Unable to read', TrayFilename) + scaleFactor = potSize[1]/PotCapturedImage.shape[0] + print('scaleFactor', scaleFactor) + PotCapturedImage = cv2.resize(PotCapturedImage, potSize, interpolation = cv2.INTER_CUBIC) + print('PotCapturedImage.shape') + Pot_PyramidImages = utils.createImagePyramid(PotCapturedImage) + + # collect 24 colours from the captured color card: + Colors = np.zeros([3,24]) + for i in range(24): + Row = int(i/6) + Col = i - Row*6 + rr = Row*SquareSize + HalfSquareSize + cc = Col*SquareSize + HalfSquareSize + Colors[0,i] = P24ColorCardTrueImage[rr,cc,0] + Colors[1,i] = P24ColorCardTrueImage[rr,cc,1] + Colors[2,i] = P24ColorCardTrueImage[rr,cc,2] + print('Colors = \n', Colors) + + if len(ImageFile): + img_iter = [sorted(glob.glob(ImageFile))] + elif len(InputRootFolder): + img_iter = ts_iter_images(InputRootFolder) + else: + print('Need imput image for image folder') + return + + ArgList = [] + for i,ImageFile_ in enumerate(img_iter): + if i <= 830: + continue + if len(OutputFolder) > 0 and len(InputRootFolder) == 0: + OutputFile = os.path.join(OutputFolder, os.path.basename(ImageFile_)) + elif len(OutputFolder) > 0 and len(InputRootFolder) > 0: + ImagePath = os.path.dirname(ImageFile_) + ImageName = os.path.basename(ImageFile_) + OutputPath = os.path.join(OutputFolder, ImagePath[len(InputRootFolder):]) + OutputFile = os.path.join(OutputPath, ImageName) + ArgList.append([ImageFile_, RotationAngle, UndistMapX, UndistMapY, P24ColorCardCaptured_PyramidImages, colorcardPosition, Colors, Tray_PyramidImagesList, trayPositions, Pot_PyramidImages, OutputFile]) +# if i == 50: +# break + Process = Pool(processes = NoJobs) + import time + time1 = time.time() + + Results = Process.map(correctDistortionAndColor, ArgList) +# for Arg in ArgList: +# correctDistortionAndColor(Arg) + + time2 = time.time() + json.dump(Results, open(os.path.join(OutputFolder, 'Result.json'))) + InfoFile = os.path.join(OutputFolder, 'ColorCorrectionInfo.txt') + with open(InfoFile, 'w') as myfile: + myfile.write('It took %0.3f seconds to process %d files using %d processes\n' % (time2-time1, len(Results), NoJobs)) + myfile.write('ImageFileName; MatchingScore; ColorPosition-X(-1.0 for undetected colorbar); ColorbarPosition-Y(-1.0 for undetected colorbar); CorrectionError(-1.0 for undetected colorbar)\n') + print('Finished. Saved color correction info to', InfoFile) +if __name__ == "__main__": + main(sys.argv[1:]) + + diff --git a/unwarp_rectify/cv2yml.py b/unwarp_rectify/cv2yml.py new file mode 100644 index 0000000..4aafaac --- /dev/null +++ b/unwarp_rectify/cv2yml.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- +""" +Created on Thu May 15 15:52:40 2014 + +@author: chuong nguyen, chuong.nguyen@anu.edu.au + +This script provides functions to read and write YAML format used by OpenCV + +Current support: string, int, float, 2D numpy array +""" + +from __future__ import absolute_import, division, print_function + +import numpy as np + +def readValueFromLineYML(line): + name = line[:line.index(':')].strip() + string = line[line.index(':')+1:].strip() + if string[0] in '-+.0123456789': + if '.' in string: + value = float(string) + else: + value = int(string) + else: + value = string + + return name, value + +def readOpenCVArrayFromYML(myfile): + line = myfile.readline().strip() + rname, rows = readValueFromLineYML(line) + line = myfile.readline().strip() + cname, cols = readValueFromLineYML(line) + line = myfile.readline().strip() + dtname, dtype = readValueFromLineYML(line) + line = myfile.readline().strip() + dname, data = readValueFromLineYML(line) + if rname != 'rows' and cname != 'cols' and dtname != 'dt' \ + and dname != 'data' and '[' in data: + print('Error reading YML file') + elif dtype != 'd': + print('Unsupported data type: dt = ' + dtype) + else: + if ']' not in data: + while True: + line = myfile.readline().strip() + data = data + line + if ']' in line: + break + data = data[data.index('[')+1: data.index(']')].split(',') + dlist = [float(el) for el in data] + if cols == 1: + value = np.asarray(dlist) + else: + value = np.asarray(dlist).reshape([rows, cols]) + return value + +def yml2dic(filename): + with open (filename, 'r') as myfile: + dicdata = {} + while True: + line = myfile.readline() + if not line: + break + + line = line.strip() + if len(line) == 0 or line[0] == '#': + continue + + if ':' in line: + name, value = readValueFromLineYML(line) + + # if OpenCV array, do extra reading + if isinstance(value, str) and 'opencv-matrix' in value: + value = readOpenCVArrayFromYML(myfile) + + # add parameters + dicdata[name] = value + + return dicdata + +def writeOpenCVArrayToYML(myfile, key, data): + myfile.write(key + ': !!opencv-matrix\n') + myfile.write(' rows: %d\n' %data.shape[0]) + myfile.write(' cols: %d\n' %data.shape[1]) + myfile.write(' dt: d\n') + myfile.write(' data: [') + datalist = [] + for i in range(data.shape[0]): + datalist = datalist + [str(num) for num in list(data[i,:])] + myfile.write(', '.join(datalist)) + myfile.write(']\n') + +def dic2yml(filename, dicdata): + with open (filename, 'w') as myfile: + myfile.write('%YAML:1.0\n') + for key in dicdata: + data = dicdata[key] + if type(data) == np.ndarray: + writeOpenCVArrayToYML(myfile, key, data) + elif isinstance(data, str): + myfile.write(key+': "%s"\n' %data) + elif isinstance(data, int): + myfile.write(key+': %d\n' %data) + elif isinstance(data, float): + myfile.write(key+': %f\n' %data) + else: + print('Unsupported data: ', data) + \ No newline at end of file diff --git a/unwarp_rectify/data/CameraTrax_24ColorCard_2x3in.png b/unwarp_rectify/data/CameraTrax_24ColorCard_2x3in.png new file mode 100644 index 0000000..3c959cd Binary files /dev/null and b/unwarp_rectify/data/CameraTrax_24ColorCard_2x3in.png differ diff --git a/unwarp_rectify/data/CameraTrax_24ColorCard_2x3inCaptured.png b/unwarp_rectify/data/CameraTrax_24ColorCard_2x3inCaptured.png new file mode 100644 index 0000000..7d1bae2 Binary files /dev/null and b/unwarp_rectify/data/CameraTrax_24ColorCard_2x3inCaptured.png differ diff --git a/unwarp_rectify/data/Canon700D_18mm_CalibParam.yml b/unwarp_rectify/data/Canon700D_18mm_CalibParam.yml new file mode 100644 index 0000000..9f47b6f --- /dev/null +++ b/unwarp_rectify/data/Canon700D_18mm_CalibParam.yml @@ -0,0 +1,70 @@ +%YAML:1.0 +calibration_time: "2014-05-13 15:08:58" +calibration_RMS: 2.083800 +# physical square size [mm] or distance between control points +# if it equals 1, the physical size is not provided +square_size: 40.000000 +image_width: 5184 +image_height: 3456 +camera_matrix: !!opencv-matrix + rows: 3 + cols: 3 + dt: d + data: [ 4234.949389, 0.000000, 2591.500000, 0.000000, 4234.949389, 1727.500000, 0.000000, 0.000000, 1.000000] +distortion_coefficients: !!opencv-matrix + rows: 5 + cols: 1 + dt: d + data: [ -0.166191, 0.142034, 0.0, 0.0, 0.0] +# rotation vectors of the camera +RVecs: !!opencv-matrix + rows: 20 + cols: 3 + dt: d + data: [ 0.085430, 0.000330, -0.001161, + -0.266227, -0.011043, -0.007287, + -0.475124, 0.024717, -0.009005, + 0.344872, -0.011215, 0.024283, + 0.194615, -0.003308, -0.009305, + 0.074673, -0.629056, -0.013138, + 0.077487, -0.462657, -0.028699, + 0.086771, -0.239532, -0.019715, + 0.080150, -0.376787, -0.011105, + 0.479727, 0.376682, 1.540682, + 0.304966, 0.188570, 1.564530, + 0.288730, 0.441077, 0.044880, + 0.365056, 0.503302, 1.429071, + -0.191630, -0.395442, 0.023342, + 0.307303, -0.480043, -0.025885, + 0.121273, -0.366272, -0.023792, + -0.067700, -0.226939, -0.009199, + 0.087848, 0.333883, 0.263452, + 0.296021, 0.368768, 0.175423, + -0.293886, 0.551607, 0.262630 + ] +# translation vectors of the camera +TVecs: !!opencv-matrix + rows: 20 + cols: 3 + dt: d + data: [ -205.699453, -213.926182, 688.003092, + -238.614185, -242.908236, 735.652240, + -218.770122, -186.041621, 840.593177, + -229.418419, -202.552975, 722.405679, + -201.999230, -176.692873, 669.312545, + -284.672637, -203.279033, 639.711727, + -144.598382, -204.006329, 678.538206, + -173.708721, -214.406611, 657.372486, + -334.321898, -208.888052, 635.888758, + 328.302728, -208.960812, 678.946622, + 188.397398, -208.403866, 697.198283, + -87.388244, -243.134187, 805.139512, + 293.856532, -202.232401, 717.290995, + -368.666577, -221.589373, 758.857238, + -285.236544, -162.062292, 554.225466, + -194.879641, -196.936376, 629.130402, + -257.102928, -210.509077, 715.222735, + -117.227172, -292.416993, 855.361209, + -89.349303, -265.311681, 841.597319, + 39.414964, -271.661088, 1066.881195 + ] diff --git a/unwarp_rectify/data/PotCaptured.png b/unwarp_rectify/data/PotCaptured.png new file mode 100644 index 0000000..75f809f Binary files /dev/null and b/unwarp_rectify/data/PotCaptured.png differ diff --git a/unwarp_rectify/data/PotEstimatedPositions.yml b/unwarp_rectify/data/PotEstimatedPositions.yml new file mode 100644 index 0000000..8e98793 --- /dev/null +++ b/unwarp_rectify/data/PotEstimatedPositions.yml @@ -0,0 +1,6 @@ +%YAML:1.0 +PotEstimatedPositions: !!opencv-matrix + rows: 20 + cols: 2 + dt: d + data: [0.124131082423, 0.90065681445, 0.124131082423, 0.701149425287, 0.124131082423, 0.501642036125, 0.124131082423, 0.302134646962, 0.124131082423, 0.1026272578, 0.373386295929, 0.90065681445, 0.373386295929, 0.701149425287, 0.373386295929, 0.501642036125, 0.373386295929, 0.302134646962, 0.373386295929, 0.1026272578, 0.622641509434, 0.90065681445, 0.622641509434, 0.701149425287, 0.622641509434, 0.501642036125, 0.622641509434, 0.302134646962, 0.622641509434, 0.1026272578, 0.871896722939, 0.90065681445, 0.871896722939, 0.701149425287, 0.871896722939, 0.501642036125, 0.871896722939, 0.302134646962, 0.871896722939, 0.1026272578] diff --git a/unwarp_rectify/data/PotTemplate.png b/unwarp_rectify/data/PotTemplate.png new file mode 100644 index 0000000..3ad7023 Binary files /dev/null and b/unwarp_rectify/data/PotTemplate.png differ diff --git a/unwarp_rectify/data/Tray01.png b/unwarp_rectify/data/Tray01.png new file mode 100644 index 0000000..1d846e0 Binary files /dev/null and b/unwarp_rectify/data/Tray01.png differ diff --git a/unwarp_rectify/data/Tray02.png b/unwarp_rectify/data/Tray02.png new file mode 100644 index 0000000..2496135 Binary files /dev/null and b/unwarp_rectify/data/Tray02.png differ diff --git a/unwarp_rectify/data/Tray03.png b/unwarp_rectify/data/Tray03.png new file mode 100644 index 0000000..533ce52 Binary files /dev/null and b/unwarp_rectify/data/Tray03.png differ diff --git a/unwarp_rectify/data/Tray04.png b/unwarp_rectify/data/Tray04.png new file mode 100644 index 0000000..67a9452 Binary files /dev/null and b/unwarp_rectify/data/Tray04.png differ diff --git a/unwarp_rectify/data/Tray05.png b/unwarp_rectify/data/Tray05.png new file mode 100644 index 0000000..20509da Binary files /dev/null and b/unwarp_rectify/data/Tray05.png differ diff --git a/unwarp_rectify/data/Tray06.png b/unwarp_rectify/data/Tray06.png new file mode 100644 index 0000000..d861914 Binary files /dev/null and b/unwarp_rectify/data/Tray06.png differ diff --git a/unwarp_rectify/data/Tray07.png b/unwarp_rectify/data/Tray07.png new file mode 100644 index 0000000..7398358 Binary files /dev/null and b/unwarp_rectify/data/Tray07.png differ diff --git a/unwarp_rectify/data/Tray08.png b/unwarp_rectify/data/Tray08.png new file mode 100644 index 0000000..34ae863 Binary files /dev/null and b/unwarp_rectify/data/Tray08.png differ diff --git a/unwarp_rectify/data/TrayEstimatedPositions.yml b/unwarp_rectify/data/TrayEstimatedPositions.yml new file mode 100644 index 0000000..14672e8 --- /dev/null +++ b/unwarp_rectify/data/TrayEstimatedPositions.yml @@ -0,0 +1,6 @@ +%YAML:1.0 +TrayEstimatedPositions: !!opencv-matrix + rows: 8 + cols: 2 + dt: d + data: [0.178240740741, 0.700810185185, 0.383487654321, 0.697916666667, 0.585648148148, 0.69212962963, 0.790509259259, 0.684606481481, 0.175154320988, 0.331597222222, 0.385030864198, 0.328703703704, 0.582175925926, 0.324652777778, 0.788966049383, 0.315393518519] diff --git a/unwarp_rectify/data/pipeline.yml b/unwarp_rectify/data/pipeline.yml new file mode 100644 index 0000000..60b5968 --- /dev/null +++ b/unwarp_rectify/data/pipeline.yml @@ -0,0 +1,44 @@ +- - undistort + - cameraMatrix: + - [4234.949389, 0.0, 2591.5] + - [0.0, 4234.949389, 1727.5] + - [0.0, 0.0, 1.0] + distortCoefs: [-0.166191, 0.142034, 0.0, 0.0, 0.0] + imageSize: [5184, 3456] + mess: ---perform undistortion--- + rotationAngle: 180 +- - colorcarddetect + - colorcardFile: CapturedColorcard.png + colorcardPosition: [2690.2459188640732, 1570.372687990673] + colorcardTrueColors: + - [115.0, 196.0, 91.0, 94.0, 129.0, 98.0, 223.0, 58.0, 194.0, 93.0, 162.0, 229.0, + 49.0, 77.0, 173.0, 241.0, 190.0, 0.0, 242.0, 203.0, 162.0, 120.0, 84.0, 50.0] + - [ 83.0, 147.0, 122.0, 108.0, 128.0, 190.0, 124.0, 92.0, 82.0, 60.0, 190.0, 158.0, + 66.0, 153.0, 57.0, 201.0, 85.0, 135.0, 243.0, 203.0, 163.0, 120.0, 84.0, 50.0] + - [ 68.0, 127.0, 155.0, 66.0, 176.0, 168.0, 47.0, 174.0, 96.0, 103.0, 62.0, 41.0, + 147.0, 71.0, 60.0, 25.0, 150.0, 166.0, 245.0, 204.0, 162.0, 120.0, 84.0, 52.0] + mess: ---perform color card detection--- +- - colorcorrect + - {mess: ---perform color correction---} +- - traydetect + - mess: ---perform tray detection--- + trayFiles: Tray_%02d.png + trayNumber: 8 + trayPositions: + - [813.1200732390869, 2457.8882643177812] + - [1967.7154928914415, 2462.615382111313] + - [3116.049471440563, 2459.757543766164] + - [4257.452096609087, 2410.917719211438] + - [795.3079993068732, 1041.2130013401788] + - [1966.9654928914415, 1023.4352507176668] + - [3125.6687508889454, 1023.55453016605] + - [4310.280480060579, 1010.3159712692838] +- - potdetect + - mess: ---perform pot detection--- + potFile: Pot.png + potTemplateFile: PotTemplate.png + potPosition: [2699.7610189795187, 1940.5169498354533] + potSize: [262, 262] + traySize: [1125, 1368] +- - plantextract + - {mess: ---extract plant biometrics---} diff --git a/unwarp_rectify/detectColorBarPosition.py b/unwarp_rectify/detectColorBarPosition.py new file mode 100644 index 0000000..30d1537 --- /dev/null +++ b/unwarp_rectify/detectColorBarPosition.py @@ -0,0 +1,193 @@ +# -*- coding: utf-8 -*- +""" +Created on Wed Jun 4 12:13:04 2014 + +@author: chuong +""" +from __future__ import absolute_import, division, print_function + +import numpy as np +from matplotlib import pyplot as plt +import getopt, sys, os +import cv2 +import cv2yml +import glob +from scipy import optimize +from timestream.parse import ts_iter_images +from multiprocessing import Pool + +def getRectangleParamters(Rect): + tl = np.asarray(Rect[0]) + bl = np.asarray(Rect[1]) + br = np.asarray(Rect[2]) + tr = np.asarray(Rect[3]) + + # paramters of fitted Rectangle + Centre = (tl + bl + br + tr)/4.0 + Width = (np.linalg.norm(tr - tl) + np.linalg.norm(br - bl))/2.0 + Height = (np.linalg.norm(bl - tl) + np.linalg.norm(br - tr))/2.0 + Angle = (np.arctan2(-(tr[1] - tl[1]), tr[0] - tl[0]) + \ + np.arctan2(-(br[1] - bl[1]), br[0] - bl[0]) + \ + np.arctan2( bl[0] - tl[0] , bl[1] - tl[1]) + \ + np.arctan2( br[0] - tr[0] , br[1] - tr[1]))/4 + return Centre, Width, Height, Angle + +def createMap(Centre, Width, Height, Angle): + MapX, MapY = np.meshgrid(np.arange(Width), np.arange(Height)) + MapX = MapX - Width/2.0 + MapY = MapY - Height/2.0 + MapX2 = MapX*np.cos(Angle) + MapY*np.sin(Angle) + Centre[0] + MapY2 = -MapX*np.sin(Angle) + MapY*np.cos(Angle) + Centre[1] + return MapX2.astype(np.float32), MapY2.astype(np.float32) + +def rotateImage(Image, RotationAngle = 0.0): + Image_ = Image + if RotationAngle%90.0 == 0: + k = RotationAngle//90.0 + Image_ = np.rot90(np.rot90(Image_), k) + elif RotationAngle != 0: + center=tuple(np.array(Image_.shape[0:2])/2) + rot_mat = cv2.getRotationMatrix2D(center, RotationAngle,1.0) + Image_ = cv2.warpAffine(Image_, rot_mat, Image_.shape[0:2],flags=cv2.INTER_LINEAR) + return Image_ + +def matchTemplate(Image, Template, SearchTopLeftCorner, SearchBottomRightCorner): + CropedImage = Image[SearchTopLeftCorner[1]:SearchBottomRightCorner[1], SearchTopLeftCorner[0]:SearchBottomRightCorner[0]] + corrMap = cv2.matchTemplate(CropedImage.astype(np.uint8), Template.astype(np.uint8), cv2.TM_CCOEFF_NORMED) + _, maxVal, _, maxLoc = cv2.minMaxLoc(corrMap) + # recalculate max position in cropped image space + matchedLocImageCropped = (maxLoc[0] + Template.shape[1]//2, + maxLoc[1] + Template.shape[0]//2) + # recalculate max position in full image space + matchedLocImage = (matchedLocImageCropped[0] + SearchTopLeftCorner[0], \ + matchedLocImageCropped[1] + SearchTopLeftCorner[1]) +# plt.figure() +# plt.imshow(corrMap) +# plt.hold(True) +# plt.plot([maxLoc[0]], [maxLoc[1]], 'o') +# plt.figure() +# plt.imshow(CropedImage) +# plt.hold(True) +# plt.plot([matchedLocImageCropped[0]], [matchedLocImageCropped[1]], 'o') +# plt.figure() +# plt.imshow(Image) +# plt.hold(True) +# plt.plot([matchedLocImage[0]], [matchedLocImage[1]], 'o') +# plt.show() + + return matchedLocImage, maxVal, maxLoc, corrMap + +def findColorbarPyramid(Image, Colorbar, RotationAngle = None, SearchRange = 0.5, NoLevels = 5, FinalLevel = 1): + for i in range(NoLevels): + if i == 0: + PyramidImages = [Image] + PyramidColorbars = [Colorbar] + else: + PyramidImages.append(cv2.pyrDown(PyramidImages[i-1])) + PyramidColorbars.append(cv2.pyrDown(PyramidColorbars[i-1])) + + for i in range(NoLevels-1, -1, -1): + if i == NoLevels-1: + maxLocEst = [PyramidImages[i].shape[1]//2, PyramidImages[i].shape[0]//2] # image center + if SearchRange > 0 and SearchRange <= 1.0: + CroppedHalfWidth = SearchRange*PyramidImages[i].shape[1]//2 + CroppedHalfHeight = SearchRange*PyramidImages[i].shape[0]//2 + else: + CroppedHalfWidth = PyramidImages[i].shape[1]//2 + CroppedHalfHeight = PyramidImages[i].shape[0]//2 + SearchTopLeftCorner = [maxLocEst[0]-CroppedHalfWidth, maxLocEst[1]-CroppedHalfHeight] + SearchBottomRightCorner = [maxLocEst[0]+CroppedHalfWidth, maxLocEst[1]+CroppedHalfHeight] + + matchedLocImage, maxVal, maxLoc, corrMap = matchTemplate(PyramidImages[i], PyramidColorbars[i], SearchTopLeftCorner, SearchBottomRightCorner) + + if RotationAngle == None: + matchedLocImage180, maxVal180, maxLoc180, corrMap180 = matchTemplate(np.rot90(PyramidImages[i],2).astype(np.uint8), PyramidColorbars[i], SearchTopLeftCorner, SearchBottomRightCorner) + print('maxVal, maxVal180', maxVal, maxVal180) + if maxVal < 0.3 and maxVal180 < 0.3: + # similar distance: very likely cannot find colorbar + print('#### Cannot find a colorbar ####') +# return None, None, None + if maxVal < maxVal180: + PyramidImages = [np.rot90(Img,2) for Img in PyramidImages] + matchedLocImage, matchedLocImage180 = matchedLocImage180, matchedLocImage + maxVal, maxVal180 = maxVal180, maxVal + maxLoc, maxLoc180 = maxLoc180, maxLoc + corrMap, corrMap180 = corrMap180, corrMap + RotationAngle = 180 + else: + RotationAngle = 0 + # rescale to location in level-0 image + matchedLocImage0 = (matchedLocImage[0]*2**i, matchedLocImage[1]*2**i) + else: + maxLocEst = (matchedLocImage0[0]//2**i, matchedLocImage0[1]//2**i) + searchRange = (6,6) + + CroppedHalfWidth = PyramidColorbars[i].shape[1]//2 + searchRange[1]//2 + CroppedHalfHeight = PyramidColorbars[i].shape[0]//2 + searchRange[0]//2 + SearchTopLeftCorner = [maxLocEst[0]-CroppedHalfWidth, maxLocEst[1]-CroppedHalfHeight] + SearchBottomRightCorner = [maxLocEst[0]+CroppedHalfWidth, maxLocEst[1]+CroppedHalfHeight] + + matchedLocImage, maxVal, maxLoc, corrMap = matchTemplate(PyramidImages[i], PyramidColorbars[i], SearchTopLeftCorner, SearchBottomRightCorner) + # rescale to location in level-0 image + matchedLocImage0 = (matchedLocImage[0]*2**i, matchedLocImage[1]*2**i) + + plt.figure() + plt.imshow(corrMap) + plt.hold(True) + plt.plot([maxLoc[0]], [maxLoc[1]], 'o') + plt.title('maxVal = %f' %maxVal) + + plt.figure() + plt.imshow(PyramidImages[i]) + plt.hold(True) + plt.plot([matchedLocImage[0]], [matchedLocImage[1]], 'o') + plt.title('Level = %d, RotationAngle = %f' %(i, RotationAngle)) + plt.show() + + if i == FinalLevel: + # Skip early to save time + break + + print('maxVal, maxLocImage, RotationAngle =', maxVal, matchedLocImage0, RotationAngle) + return maxVal, matchedLocImage0, RotationAngle + + +RectData = cv2yml.yml2dic('/home/chuong/Data/ColorbarPositions/ColorbarRectangle.yml') +RotationAngle = RectData['RotationAngle'] +Rect = RectData['Colorbar'].tolist() +print('Rect =', Rect) +Centre, Width, Height, Angle = getRectangleParamters(Rect) +print(Centre, Width, Height, Angle) +ColCardMapX, ColCardMapY = createMap(Centre, Width, Height, Angle) + +P24ColorCard = cv2.imread('/home/chuong/Data/ColorbarPositions/CameraTrax_24ColorCard_2x3in.png')[:,:,::-1] # read and convert to R-G-B image +SquareSize = int(P24ColorCard.shape[0]/4) +HalfSquareSize = int(SquareSize/2) + +P24ColorCardCaptured = cv2.imread('/home/chuong/Data/ColorbarPositions/CameraTrax_24ColorCard_2x3inCaptured.png')[:,:,::-1] # read and convert to R-G-B image +SquareSizeCaptured = int(P24ColorCardCaptured.shape[0]/4) +HalfSquareSizeCaptured = int(SquareSizeCaptured/2) + +img_iter = ts_iter_images('/home/chuong/Data/ColorbarPositions') +#img_iter = ts_iter_images('/home/chuong/Data/BVZ0012-GC02L-CN650D-Cam01') +for ImageFile in img_iter: + Image = cv2.imread(ImageFile)[:,:,::-1] + print(ImageFile) + if Image.shape[0] > Image.shape[1]: + RotationAngle = 90 + Image = rotateImage(Image, RotationAngle) +# maxVal, maxLoc = findColorbarPyramid(Image, P24ColorCardCaptured) + maxVal, maxLoc, RotationAngle2 = findColorbarPyramid(Image, P24ColorCardCaptured, NoLevels = 5, FinalLevel = 3) + if maxVal == None: + continue + RotationAngle = RotationAngle + RotationAngle2 +# if maxVal < maxVal180: +# RotationAngle = RotationAngle + 180 +# maxVal, maxLoc, maxVal180, maxLoc180 = maxVal180, maxLoc180, maxVal, maxLoc +# print('Selected maxVal, maxLoc =', maxVal, maxLoc) + +# plt.figure() +# plt.imshow(Image) +# plt.figure() +# plt.imshow(corrMap) +# plt.show() \ No newline at end of file diff --git a/unwarp_rectify/detectTraysAndPots.py b/unwarp_rectify/detectTraysAndPots.py new file mode 100644 index 0000000..6c4890b --- /dev/null +++ b/unwarp_rectify/detectTraysAndPots.py @@ -0,0 +1,363 @@ +# -*- coding: utf-8 -*- +""" +Created on Tue May 13 16:17:53 2014 + +@author: chuong +""" +from __future__ import absolute_import, division, print_function + +import numpy as np +from matplotlib import pyplot as plt +import getopt, sys, os +import cv2 +import cv2yml +import glob +from scipy import optimize +from timestream.parse import ts_iter_images +from multiprocessing import Pool + +global isShow +isShow= False + + +def matchTemplateLocation(Image, Template, EstimatedLocation, SearchRange = [0.5, 0.5], RangeInImage = True): + if RangeInImage: # use image size + Width = Image.shape[1] + Height = Image.shape[0] + else: # use template size + Width = Template.shape[1] + Height = Template.shape[0] + + if SearchRange == None: # search throughout the whole images + CroppedHalfWidth = Width//2 + CroppedHalfHeight = Height//2 + elif SearchRange[0] <= 1.0 and SearchRange[1] <= 1.0: # in fraction values + CroppedHalfWidth = (Template.shape[1]+SearchRange[0]*Width)//2 + CroppedHalfHeight = (Template.shape[0]+SearchRange[1]*Height)//2 + else: # in pixels values + CroppedHalfWidth = (Template.shape[1]+SearchRange[0])//2 + CroppedHalfHeight = (Template.shape[0]+SearchRange[1])//2 + + if CroppedHalfWidth > Image.shape[1]//2-1: + CroppedHalfWidth = Image.shape[1]//2-1 + if CroppedHalfHeight > Image.shape[0]//2-1: + CroppedHalfHeight = Image.shape[0]//2-1 + + SearchTopLeftCorner = [EstimatedLocation[0]-CroppedHalfWidth, EstimatedLocation[1]-CroppedHalfHeight] + SearchBottomRightCorner = [EstimatedLocation[0]+CroppedHalfWidth, EstimatedLocation[1]+CroppedHalfHeight] + + return matchTemplateWindow(Image, Template, SearchTopLeftCorner, SearchBottomRightCorner) + +def matchTemplateWindow(Image, Template, SearchTopLeftCorner, SearchBottomRightCorner): + CropedImage = Image[SearchTopLeftCorner[1]:SearchBottomRightCorner[1], SearchTopLeftCorner[0]:SearchBottomRightCorner[0]] + + if isShow: + plt.figure() + plt.imshow(Template) + plt.figure() + plt.imshow(CropedImage) + plt.show() + + corrMap = cv2.matchTemplate(CropedImage.astype(np.uint8), Template.astype(np.uint8), cv2.TM_CCOEFF_NORMED) + _, maxVal, _, maxLoc = cv2.minMaxLoc(corrMap) + # recalculate max position in cropped image space + matchedLocImageCropped = (maxLoc[0] + Template.shape[1]//2, + maxLoc[1] + Template.shape[0]//2) + # recalculate max position in full image space + matchedLocImage = (matchedLocImageCropped[0] + SearchTopLeftCorner[0], \ + matchedLocImageCropped[1] + SearchTopLeftCorner[1]) + if isShow: + plt.figure() + plt.imshow(corrMap) + plt.hold(True) + plt.plot([maxLoc[0]], [maxLoc[1]], 'o') + plt.figure() + plt.imshow(CropedImage) + plt.hold(True) + plt.plot([matchedLocImageCropped[0]], [matchedLocImageCropped[1]], 'o') + plt.figure() + plt.imshow(Image) + plt.hold(True) + plt.plot([matchedLocImage[0]], [matchedLocImage[1]], 'o') + plt.show() + + return matchedLocImage, maxVal, maxLoc, corrMap + +def createImagePyramid(Image, NoLevels = 5): + for i in range(NoLevels): + if i == 0: + PyramidImages = [Image.astype(np.uint8)] + else: + PyramidImages.append(cv2.pyrDown(PyramidImages[i-1]).astype(np.uint8)) + return PyramidImages + +def matchTemplatePyramid(PyramidImages, PyramidTemplates, RotationAngle = None, \ + EstimatedLocation = None, SearchRange = None, NoLevels = 4, FinalLevel = 1): + for i in range(NoLevels-1, -1, -1): + if i == NoLevels-1: + if EstimatedLocation == None: + maxLocEst = [PyramidImages[i].shape[1]//2, PyramidImages[i].shape[0]//2] # image center + else: + maxLocEst = [EstimatedLocation[0]//2**i, EstimatedLocation[1]//2**i] # scale position to the pyramid level + + if SearchRange[0] > 1.0 and SearchRange[1] > 1.0: + SearchRange2 = [SearchRange[0]//2**i, SearchRange[1]//2**i] + else: + SearchRange2 = SearchRange + + matchedLocImage, maxVal, maxLoc, corrMap = matchTemplateLocation(PyramidImages[i], PyramidTemplates[i], maxLocEst, SearchRange = SearchRange2) + if RotationAngle == None: + matchedLocImage180, maxVal180, maxLoc180, corrMap180 = matchTemplateLocation(np.rot90(PyramidImages[i],2).astype(np.uint8), PyramidTemplates[i], maxLocEst, SearchRange) + if maxVal < 0.3 and maxVal180 < 0.3: + print('#### Warning: low matching score ####') +# return None, None, None + if maxVal < maxVal180: + PyramidImages = [np.rot90(Img,2) for Img in PyramidImages] + matchedLocImage, matchedLocImage180 = matchedLocImage180, matchedLocImage + maxVal, maxVal180 = maxVal180, maxVal + maxLoc, maxLoc180 = maxLoc180, maxLoc + corrMap, corrMap180 = corrMap180, corrMap + RotationAngle = 180 + else: + RotationAngle = 0 + # rescale to location in level-0 image + matchedLocImage0 = (matchedLocImage[0]*2**i, matchedLocImage[1]*2**i) + else: + maxLocEst = (matchedLocImage0[0]//2**i, matchedLocImage0[1]//2**i) + searchRange = [6,6] + + matchedLocImage, maxVal, maxLoc, corrMap = matchTemplateLocation(PyramidImages[i], PyramidTemplates[i], maxLocEst, searchRange) + # rescale to location in level-0 image + matchedLocImage0 = (matchedLocImage[0]*2**i, matchedLocImage[1]*2**i) + +# plt.figure() +# plt.imshow(corrMap) +# plt.hold(True) +# plt.plot([maxLoc[0]], [maxLoc[1]], 'o') +# plt.title('maxVal = %f' %maxVal) +# +# plt.figure() +# plt.imshow(PyramidImages[i]) +# plt.hold(True) +# plt.plot([matchedLocImage[0]], [matchedLocImage[1]], 'o') +# plt.title('Level = %d, RotationAngle = %f' %(i, RotationAngle)) +# plt.show() + + if i == FinalLevel: + # Skip early to save time + break + +# print('maxVal, maxLocImage, RotationAngle =', maxVal, matchedLocImage0, RotationAngle) + return maxVal, matchedLocImage0, RotationAngle + +def detectTraysAndPots(Arg): + ImageFile_, Tray_PyramidImagesList, TrayEstimatedPositions, \ + Pot_PyramidImages, PotEstimatedPositions, OutputFile = Arg + print('Process ', ImageFile_) + Image = cv2.imread(ImageFile_)[:,:,::-1] # read and convert to R-G-B image + PyramidImages = createImagePyramid(Image) + + TrayLocs = [] + PotLocs2 = [] + PotLocs2_ = [] + PotIndex = 0 + for i,Tray_PyramidImages in enumerate(Tray_PyramidImagesList): + if TrayEstimatedPositions != None: + EstimateLoc = [TrayEstimatedPositions[i][0]*PyramidImages[0].shape[1], + TrayEstimatedPositions[i][1]*PyramidImages[0].shape[0]] + TrayScore, TrayLoc, TrayAngle = matchTemplatePyramid(PyramidImages, Tray_PyramidImages, \ + RotationAngle = 0, EstimatedLocation = EstimateLoc, \ + SearchRange = [Tray_PyramidImages[0].shape[1]//2, Tray_PyramidImages[0].shape[0]//2]) + else: + TrayScore, TrayLoc, TrayAngle = matchTemplatePyramid(PyramidImages, Tray_PyramidImages, RotationAngle = 0, SearchRange = [1.0, 1.0]) + TrayLocs.append(TrayLoc) + + PotLocs = [] + PotLocs_ = [] + SearchRange = [Pot_PyramidImages[0].shape[1]//6, Pot_PyramidImages[0].shape[0]//6] + if PotEstimatedPositions != None: + for PotEstimatedPosition in PotEstimatedPositions: + EstimateLoc = [TrayLoc[0] - Tray_PyramidImages[0].shape[1]//2 + PotEstimatedPosition[0]*Tray_PyramidImages[0].shape[1], \ + TrayLoc[1] - Tray_PyramidImages[0].shape[0]//2 + PotEstimatedPosition[1]*Tray_PyramidImages[0].shape[0]] + + PotScore, PotLoc, PotAngle = matchTemplatePyramid(PyramidImages, \ + Pot_PyramidImages, RotationAngle = 0, \ + EstimatedLocation = EstimateLoc, NoLevels = 3, SearchRange = SearchRange) + PotLocs.append(PotLoc) + PotLocs_.append(EstimateLoc) + else: + StepX = Tray_PyramidImages[0].shape[1]//4 + StepY = Tray_PyramidImages[0].shape[0]//5 + StartX = TrayLoc[0] - Tray_PyramidImages[0].shape[1]//2 + StepX//2 + StartY = TrayLoc[1] + Tray_PyramidImages[0].shape[0]//2 - StepY//2 + # assuming 5x4 pots per tray + for k in range(4): + for l in range(5): + EstimateLoc = [StartX + StepX*k, StartY - StepY*l] + PotScore, PotLoc, PotAngle = matchTemplatePyramid(PyramidImages, \ + Pot_PyramidImages, RotationAngle = 0, \ + EstimatedLocation = EstimateLoc, NoLevels = 3, SearchRange = SearchRange) + PotLocs.append(PotLoc) + PotLocs_.append(EstimateLoc) + PotIndex = PotIndex + 1 + PotLocs2.append(PotLocs) + PotLocs2_.append(PotLocs_) + +# plt.figure() +# plt.imshow(Pot_PyramidImages[0]) +# plt.figure() +# plt.imshow(PyramidImages[0]) +# plt.hold(True) +# PotIndex = 0 +# for i,Loc in enumerate(TrayLocs): +# plt.plot([Loc[0]], [Loc[1]], 'bo') +# plt.text(Loc[0], Loc[1]-15, 'T'+str(i+1), color='blue', fontsize=20) +# for PotLoc,PotLoc_ in zip(PotLocs2[i], PotLocs2_[i]): +# plt.plot([PotLoc[0]], [PotLoc[1]], 'ro') +# plt.text(PotLoc[0], PotLoc[1]-15, str(PotIndex+1), color='red') +# plt.plot([PotLoc_[0]], [PotLoc_[1]], 'rx') +# PotIndex = PotIndex + 1 +# +# plt.title(os.path.basename(ImageFile_)) +# plt.show() + +# OutputPath = os.path.dirname(OutputFile) +# if not os.path.exists(OutputPath): +# print('Make', OutputPath) +# os.makedirs(OutputPath) +# cv2.imwrite(OutputFile, np.uint8(ImageCorrected[:,:,2::-1])) # convert to B-G-R image and save +# print('Saved', OutputFile) + return TrayLocs, PotLocs + + +def main(argv): + HelpString = 'correctDistortionAndColor.py -i ' + \ + '-f '+ \ + '-o \n' + \ + 'Example:\n' + \ + "$ ./detectTraysAndPots.py -f /mnt/phenocam/a_data/TimeStreams/BorevitzTest/BVZ0018/BVZ0018-GC04L~fullres-corr/ -k /mnt/phenocam/a_data/TimeStreams/BorevitzTest/BVZ0018/BVZ0018-GC04L~fullres-corr/calib_param_700Dcam.yml -g /mnt/phenocam/a_data/TimeStreams/BorevitzTest/BVZ0018/BVZ0018-GC04L~fullres-corr/CameraTrax_24ColorCard_2x3in.png -c /mnt/phenocam/a_data/TimeStreams/BorevitzTest/BVZ0018/BVZ0018-GC04L~fullres-corr/CameraTrax_24ColorCard_2x3inCaptured.png -o /mnt/phenocam/a_data/TimeStreams/BorevitzTest/BVZ0018/BVZ0018-GC04L~fullres-corr/ -j 16" + try: + opts, args = getopt.getopt(argv,"hi:f:c:r:p:t:o:j:",\ + ["ifile=","ifolder=","--configfolder","--tray-estimated-positions=",\ + "--pot-estimated-positions=","--tray-image-pattern","ofolder=","jobs="]) + except getopt.GetoptError: + print(HelpString) + sys.exit(2) + if len(opts) == 0: + print(HelpString) + sys.exit() + + ImageFile = '' + InputRootFolder = '' + OutputFolder = '' + PotPositionFile = 'PotEstimatedPositions.yml' + TrayPositionFile = 'TrayEstimatedPositions.yml' + TrayCapturedFile = 'Tray%02d.png' + PotCapturedFile = 'PotCaptured2.png' #'PotCaptured.png' #'PotCaptured4.png' #'PotCaptured3.png' # + CalibFile = 'Canon700D_18mm_CalibParam.yml' + ConfigFolder = '' + NoJobs = 1 + for opt, arg in opts: + if opt == '-h': + print(HelpString) + sys.exit() + elif opt in ("-i", "--ifile"): + ImageFile = arg + elif opt in ("-f", "--ifolder"): + InputRootFolder = arg + elif opt in ("-c", "--configfolder"): + ConfigFolder = arg + elif opt in ("-r", "--tray-estimated-positions"): + TrayPositionFile = arg + elif opt in ("-p", "--pot-estimated-positions"): + PotPositionFile = arg + elif opt in ("-t", "--tray-image-pattern"): + TrayCapturedFile = arg + elif opt in ("-o", "--ofolder"): + OutputFolder = arg + elif opt in ("-j", "--jobs"): + NoJobs = int(arg) + + if len(ConfigFolder) > 0: + PotPositionFile = os.path.join(ConfigFolder, os.path.basename(PotPositionFile)) + TrayPositionFile = os.path.join(ConfigFolder, os.path.basename(TrayPositionFile)) + TrayCapturedFile = os.path.join(ConfigFolder, os.path.basename(TrayCapturedFile)) + PotCapturedFile = os.path.join(ConfigFolder, os.path.basename(PotCapturedFile)) + CalibFile = os.path.join(ConfigFolder, os.path.basename(CalibFile)) + + if len(OutputFolder) > 0 and not os.path.exists(OutputFolder): + os.makedirs(OutputFolder) + + try: + PotEstimatedPositions = cv2yml.yml2dic(PotPositionFile)['PotEstimatedPositions'].tolist() + TrayEstimatedPositions = cv2yml.yml2dic(TrayPositionFile)['TrayEstimatedPositions'].tolist() + print('Found estimated positions of trays and pots.') + except: + print('Estimated positions of trays and pots are not provided.\n Try without this information.') + PotEstimatedPositions = None + TrayEstimatedPositions = None + + Tray_PyramidImagesList = [] + for i in range(8): + TrayFilename = TrayCapturedFile %(i+1) + TrayImage = cv2.imread(TrayFilename)[:,:,::-1] + if TrayImage == None: + print('Unable to read', TrayFilename) + Tray_PyramidImages = createImagePyramid(TrayImage) + Tray_PyramidImagesList.append(Tray_PyramidImages) + + PotCapturedImage = cv2.imread(PotCapturedFile)[:,:,::-1] # read and convert to R-G-B image + if PotCapturedImage == None: + print('Unable to read', TrayFilename) + Pot_PyramidImages = createImagePyramid(PotCapturedImage) + + if len(ImageFile): + img_iter = [sorted(glob.glob(ImageFile))] + elif len(InputRootFolder): + img_iter = ts_iter_images(InputRootFolder) + else: + print('Need imput image for image folder') + return + + ArgList = [] + for i,ImageFile_ in enumerate(img_iter): + if len(OutputFolder) > 0 and len(InputRootFolder) == 0: + OutputFile = os.path.join(OutputFolder, os.path.basename(ImageFile_)) + elif len(OutputFolder) > 0 and len(InputRootFolder) > 0: + ImagePath = os.path.dirname(ImageFile_) + ImageName = os.path.basename(ImageFile_) + OutputPath = os.path.join(OutputFolder, ImagePath[len(InputRootFolder):]) + OutputFile = os.path.join(OutputPath, ImageName) + ArgList.append([ImageFile_, Tray_PyramidImagesList, TrayEstimatedPositions, \ + Pot_PyramidImages, PotEstimatedPositions, OutputFile]) +# if i == 10: +# break + + Process = Pool(processes = NoJobs) + import time + time1 = time.time() + + Results = Process.map(detectTraysAndPots, ArgList) +# Results = [] +# for Arg in ArgList: +# TrayLocs, PotLocs = detectTraysAndPots(Arg) +# Results.append([TrayLocs, PotLocs]) + + time2 = time.time() + InfoFile = os.path.join(OutputFolder, 'TrayPotDetectionInfo.txt') + with open(InfoFile, 'w') as myfile: + myfile.write('It took %0.3f seconds to process %d files using %d processes\n' % (time2-time1, len(Results), NoJobs)) + myfile.write('ImageFileName; 5 Tray x-y positions; 20 Pot x-y positions)\n') + for Result,Arg in zip(Results, ArgList): + myfile.write('%s; ' %Arg[0]) + for TrayPosition in Result[0]: + myfile.write('%d; %d ' %(TrayPosition[0], TrayPosition[1])) + for PotPosition in Result[1]: + myfile.write('%d; %d ' %(PotPosition[0], PotPosition[1])) + myfile.write('\n') + + print('Finished. Saved color correction info to', InfoFile) +if __name__ == "__main__": + main(sys.argv[1:]) + + diff --git a/unwarp_rectify/estimateColorCorrection.py b/unwarp_rectify/estimateColorCorrection.py new file mode 100644 index 0000000..d9a6042 --- /dev/null +++ b/unwarp_rectify/estimateColorCorrection.py @@ -0,0 +1,452 @@ +# -*- coding: utf-8 -*- +""" +Created on Tue May 13 16:17:53 2014 + +@author: chuong +""" +from __future__ import absolute_import, division, print_function + +import numpy as np +from matplotlib import pyplot as plt +from matplotlib.widgets import Cursor +import getopt, sys, os, datetime +import cv2 +import cv2yml +import glob +from scipy import optimize + +class RectangleBuilder: + def __init__(self, Rectangle, AspectRatio, Image): + self.Rectangle = Rectangle + self.AspectRatio = AspectRatio + self.Image = Image + self.RectList = [] + self.cid = Rectangle.figure.canvas.mpl_connect('button_press_event', self.onMouseClicked) + self.cid = Rectangle.figure.canvas.mpl_connect('key_press_event', self.onKeyPressed) + self.lclick_x = [] + self.lclick_y = [] + + def onKeyPressed(self, event): + if event.key == 'escape': + self.lclick_x = [] + self.lclick_y = [] + print(' Clear recent selection') + elif event.key == 'up': + self.lclick_y[-1] = self.lclick_y[-1] - 5 + elif event.key == 'down': + self.lclick_y[-1] = self.lclick_y[-1] + 5 + elif event.key == 'right': + self.lclick_x[-1] = self.lclick_x[-1] + 5 + elif event.key == 'left': + self.lclick_x[-1] = self.lclick_x[-1] - 5 + self.drawLines() + + def onMouseClicked(self, event): +# print('click', event) + if event.inaxes!=self.Rectangle.axes: + return + if event.button == 1: + self.lclick_x.append(event.xdata) + self.lclick_y.append(event.ydata) + elif event.button == 3: + self.rclick_x = event.xdata + self.rclick_y = event.ydata + # remove the last selection + if len(self.lclick_x) > 0: + self.lclick_x = self.lclick_x[:-1] + self.lclick_y = self.lclick_y[:-1] + elif len(self.RectList) > 0: + self.RectList = self.RectList[:-1] + + if self.AspectRatio != None and len(self.lclick_x) == 2: + Rect = self.getRectCornersFrom2Points(self.lclick_x, self.lclick_y) + self.RectList.append(Rect) + self.lclick_x = [] + self.lclick_y = [] + elif len(self.lclick_x) == 4: + Rect = [[x,y] for x,y in zip(self.lclick_x, self.lclick_y)] + Rect = self.correctPointOrder(Rect) + self.RectList.append(Rect) + self.lclick_x = [] + self.lclick_y = [] + self.drawLines() + + def findCorner(self, Corner, CornerType = 'topleft', WindowSize = 100, Threshold = 50): + x, y = Corner + HWindowSize = int(WindowSize/2) + window = self.Image[y-HWindowSize:y+HWindowSize+1, x-HWindowSize:x+HWindowSize+1,:].astype(np.float) +# cv2.imwrite('/home/chuong/Data/GC03L-temp/corrected/'+CornerType+'.jpg', window) + foundLeftEdgeX = False + foundRightEdgeX = False + foundTopEdgeY = False + foundBottomEdgeY = False + for i in range(HWindowSize+1): + diff0 = np.sum(np.abs(window[HWindowSize, HWindowSize-i,:] - window[HWindowSize, HWindowSize,:])) + diff1 = np.sum(np.abs(window[HWindowSize, HWindowSize+i,:] - window[HWindowSize, HWindowSize,:])) + diff2 = np.sum(np.abs(window[HWindowSize-i, HWindowSize,:] - window[HWindowSize, HWindowSize,:])) + diff3 = np.sum(np.abs(window[HWindowSize+i, HWindowSize,:] - window[HWindowSize, HWindowSize,:])) + if diff0 > Threshold and not foundLeftEdgeX: + xLeftNew = x-i + foundLeftEdgeX = True + elif diff1 > Threshold and not foundRightEdgeX: + xRightNew = x+i + foundRightEdgeX = True + if diff2 > Threshold and not foundTopEdgeY: + yTopNew = y-i + foundTopEdgeY = True + elif diff3 > Threshold and not foundBottomEdgeY: + yBottomNew = y+i + foundBottomEdgeY = True + + if CornerType.lower() == 'topleft' and foundLeftEdgeX and foundTopEdgeY: + return [xLeftNew, yTopNew] + elif CornerType.lower() == 'bottomleft' and foundLeftEdgeX and foundBottomEdgeY: + return [xLeftNew, yBottomNew] + elif CornerType.lower() == 'bottomright' and foundRightEdgeX and foundBottomEdgeY: + return [xRightNew, yBottomNew] + elif CornerType.lower() == 'topright' and foundRightEdgeX and foundTopEdgeY: + return [xRightNew, yTopNew] + else: + print('Cannot detect corner ' + CornerType) + return [x, y] + + def drawLines(self): + xs, ys = [], [] + for Rect in self.RectList: + tl, bl, br, tr = Rect + xs = xs + [tl[0], bl[0], br[0], tr[0], tl[0], np.nan] + ys = ys + [tl[1], bl[1], br[1], tr[1], tl[1], np.nan] + if len(self.lclick_x) > 1: + xs = xs + [x for x in self.lclick_x] + ys = ys + [y for y in self.lclick_y] + self.Rectangle.set_data(xs, ys) + self.Rectangle.figure.canvas.draw() + + def getRectCornersFrom2Points(self, lclick_x, lclick_y): + Lenght = np.sqrt((lclick_x[0] - lclick_x[1])**2 + \ + (lclick_y[0] - lclick_y[1])**2) + Height = Lenght/np.sqrt(1+self.AspectRatio**2) + Width = Height*self.AspectRatio + Centre = np.asarray([lclick_x[0] + lclick_x[1], lclick_y[0] + lclick_y[1]])/2.0 + Angle = np.arctan2(Height, Width) - \ + np.arctan2(lclick_y[1] - lclick_y[0], lclick_x[1] - lclick_x[0]) + InitRect = self.createRectangle(Centre, Width, Height, Angle) + CornerTypes = ['topleft', 'bottomleft', 'bottomright', 'topright'] + Rect = [] + for Corner, Type in zip(InitRect, CornerTypes): + Corner = self.findCorner(Corner, Type) + Rect.append(Corner) + return Rect + + def correctPointOrder(self, Rect, tolerance = 40): + # find minimum values of x and y + minX = 10e6 + minY = 10e6 + for i in range(len(Rect[0])): + if minX > Rect[i][0]: + minX = Rect[i][0] + if minY > Rect[i][1]: + minY = Rect[i][1] + #separate left and right + topLeft, bottomLeft, topRight, bottomRight = [], [], [], [] + for i in range(len(Rect[0])): + if abs(minX - Rect[0][i]) < tolerance: + if abs(minY - Rect[i][1]) < tolerance: + topLeft = [Rect[i][0], Rect[i][1]] + else: + bottomLeft = [Rect[i][0], Rect[i][1]] + else: + if abs(minY - Rect[i][1]) < tolerance: + topRight = [Rect[i][0], Rect[i][1]] + else: + bottomRight = [Rect[i][0], Rect[i][1]] + if len(topLeft)*len(bottomLeft)*len(topRight)*len(bottomRight) == 0: + print('Cannot find corRect corner order. Change tolerance value.') + return Rect + else: + Rect = [topLeft, bottomLeft, bottomRight, topRight] + return Rect + + def createRectangle(self, Centre, Width, Height, Angle): + tl2 = np.asarray([-Width, -Height])/2.0 + bl2 = np.asarray([-Width, Height])/2.0 + br2 = np.asarray([ Width, Height])/2.0 + tr2 = np.asarray([ Width, -Height])/2.0 + RectFit = [tl2, bl2, br2, tr2] + for i in range(len(RectFit)): + # rotate around center + xrot = RectFit[i][0]*np.cos(Angle) + RectFit[i][1]*np.sin(Angle) + yrot = -RectFit[i][0]*np.sin(Angle) + RectFit[i][1]*np.cos(Angle) + RectFit[i][0], RectFit[i][1] = (xrot+Centre[0]), (yrot+Centre[1]) + return RectFit + +def selectColorCard(Img, AspectRatio): + fig = plt.figure() + ax = fig.add_subplot(111) + ax.set_title('left click at top-left corner and right click at bottom-right corner to to select pot area') + ax.imshow(Img) + Rectangle, = ax.plot([0], [0]) # empty line/Rectangle + Rectangles = RectangleBuilder(Rectangle, AspectRatio, Img) + cursor = Cursor(ax, useblit=True, color='red', linewidth=1) + plt.show() + return Rectangles.RectList + +def getRectangleParamters(Rect): + tl = np.asarray(Rect[0]) + bl = np.asarray(Rect[1]) + br = np.asarray(Rect[2]) + tr = np.asarray(Rect[3]) + + # paramters of fitted Rectangle + Centre = (tl + bl + br + tr)/4.0 + Width = (np.linalg.norm(tr - tl) + np.linalg.norm(br - bl))/2.0 + Height = (np.linalg.norm(bl - tl) + np.linalg.norm(br - tr))/2.0 + Angle = (np.arctan2(-(tr[1] - tl[1]), tr[0] - tl[0]) + \ + np.arctan2(-(br[1] - bl[1]), br[0] - bl[0]) + \ + np.arctan2( bl[0] - tl[0] , bl[1] - tl[1]) + \ + np.arctan2( br[0] - tr[0] , br[1] - tr[1]))/4 + return Centre, Width, Height, Angle + + +def createMap(Centre, Width, Height, Angle): + MapX, MapY = np.meshgrid(np.arange(Width), np.arange(Height)) + MapX = MapX - Width/2.0 + MapY = MapY - Height/2.0 + MapX2 = MapX*np.cos(Angle) + MapY*np.sin(Angle) + Centre[0] + MapY2 = -MapX*np.sin(Angle) + MapY*np.cos(Angle) + Centre[1] + return MapX2.astype(np.float32), MapY2.astype(np.float32) + +# Using modified Gamma Correction Algorithm by +# Constantinou2013 - A comparison of color correction algorithms for endoscopic cameras +def getColorMatchingError(Arg, Colors, Captured_Colors): + ColorMatrix = Arg[:9].reshape([3,3]) + ColorConstant = Arg[9:12] + ColorGamma = Arg[12:15] + ErrorList = [] + for Color, Captured_Color in zip(Colors, Captured_Colors): + Color2 = np.dot(ColorMatrix, Captured_Color) + ColorConstant + Color3 = 255.0 * np.power(Color2/255.0, ColorGamma) + Error = np.linalg.norm(Color - Color3) + ErrorList.append(Error) + return ErrorList + +def correctColor(Image, ColorMatrix, ColorConstant, ColorGamma): + ImageCorrected = np.zeros_like(Image) + for i in range(Image.shape[0]): + for j in range(Image.shape[1]): + Captured_Color = Image[i,j,:].reshape([3]) + Color2 = np.dot(ColorMatrix, Captured_Color) + ColorConstant + Color3 = 255.0 * np.power(Color2/255.0, ColorGamma) + ImageCorrected[i,j,:] = np.uint8(Color3) + return ImageCorrected + +# Using modified Gamma Correction Algorithm by +# Constantinou2013 - A comparison of color correction algorithms for endoscopic cameras +def getColorMatchingErrorVectorised(Arg, Colors, Captured_Colors): + ColorMatrix = Arg[:9].reshape([3,3]) + ColorConstant = Arg[9:12].reshape([3,1]) + ColorGamma = Arg[12:15] + + TempRGB = np.dot(ColorMatrix, Captured_Colors) + ColorConstant + Corrected_Colors = np.zeros_like(TempRGB) + Corrected_Colors[0,:] = 255.0*np.power(TempRGB[0,:]/255.0, ColorGamma[0]) + Corrected_Colors[1,:] = 255.0*np.power(TempRGB[1,:]/255.0, ColorGamma[1]) + Corrected_Colors[2,:] = 255.0*np.power(TempRGB[2,:]/255.0, ColorGamma[2]) + + Diff = Colors - Corrected_Colors + ErrorList = np.sqrt(np.sum(Diff*Diff, axis= 0)).tolist() + return ErrorList + +def correctColorVectorised(Image, ColorMatrix, ColorConstant, ColorGamma): + Width, Height = Image.shape[1::-1] + CapturedR = Image[:,:,0].reshape([1,Width*Height]) + CapturedG = Image[:,:,1].reshape([1,Width*Height]) + CapturedB = Image[:,:,2].reshape([1,Width*Height]) + CapturedRGB = np.concatenate((CapturedR, CapturedG, CapturedB), axis=0) + + TempRGB = np.dot(ColorMatrix, CapturedRGB) + ColorConstant + CorrectedRGB = np.zeros_like(TempRGB) + CorrectedRGB[0,:] = 255.0*np.power(TempRGB[0,:]/255.0, ColorGamma[0]) + CorrectedRGB[1,:] = 255.0*np.power(TempRGB[1,:]/255.0, ColorGamma[1]) + CorrectedRGB[2,:] = 255.0*np.power(TempRGB[2,:]/255.0, ColorGamma[2]) + + CorrectedR = CorrectedRGB[0,:].reshape([Height, Width]) + CorrectedG = CorrectedRGB[1,:].reshape([Height, Width]) + CorrectedB = CorrectedRGB[2,:].reshape([Height, Width]) + ImageCorrected = np.zeros_like(Image) + ImageCorrected[:,:,0] = CorrectedR + ImageCorrected[:,:,1] = CorrectedG + ImageCorrected[:,:,2] = CorrectedB + return ImageCorrected + +def rotateImage(Image, RotationAngle = 0.0): + Image_ = Image + if RotationAngle%90.0 == 0: + k = RotationAngle//90.0 + Image_ = np.rot90(np.rot90(Image_), k) + elif RotationAngle != 0: + center=tuple(np.array(Image_.shape[0:2])/2) + rot_mat = cv2.getRotationMatrix2D(center, RotationAngle,1.0) + Image_ = cv2.warpAffine(Image_, rot_mat, Image_.shape[0:2],flags=cv2.INTER_LINEAR) + return Image_ + +def main(argv): + HelpString = 'selectPots.py -i ' + \ + '-p '+ \ + '-o \n' + \ + 'Example:\n' + \ + "$ ./estimateColorCorrection.py -i /home/chuong/Data/GC03L-temp/corrected/IMG_6425.JPG -c /home/chuong/Data/GC03L-temp/corrected/TrayConfig.yml\n" +\ + "$ ./estimateColorCorrection.py -f /home/chuong/Data/GC03L-temp/corrected/ -p IMG*JPG -c /home/chuong/Data/GC03L-temp/corrected/TrayConfig.yml -o /home/chuong/Data/GC03L-temp/rectified/" + try: + opts, args = getopt.getopt(argv,"hi:f:p:r:c:g:a:o:",\ + ["ifile=","ifolder=","ipattern=","rotation=","colorcard=","gamafile=","aspectratio=","ofolder="]) + except getopt.GetoptError: + print(HelpString) + sys.exit(2) + if len(opts) == 0: + print(HelpString) + sys.exit() + + ImageFile = '' + ImageFolder = '' + ImageFilePattern = '*jpg' + RotationAngle = 0.0 + OutputFolder = '' +# TrayImgWidth = None +# TrayImgHeight = None + AspectRatio = 300.0/200.0 # None + ColorCardFile = 'CameraTrax_24ColorCard_2x3in.png' + GamaFile = 'ColorGama.yml' + for opt, arg in opts: + if opt == '-h': + print(HelpString) + sys.exit() + elif opt in ("-i", "--ifile"): + ImageFile = arg + elif opt in ("-f", "--ifolder"): + ImageFolder = arg + elif opt in ("-p", "--ipattern"): + ImageFilePattern = arg + elif opt in ("-r", "--rotation"): + RotationAngle = float(arg) + elif opt in ("-c", "--colorcard"): + ColorCardFile = arg + elif opt in ("-g", "--gamafile"): + GamaFile = arg + elif opt in ("-a", "--aspectratio"): + AspectRatio = float(arg) + elif opt in ("-o", "--ofolder"): + OutputFolder = arg + + if len(OutputFolder) > 0 and not os.path.exists(OutputFolder): + os.makedirs(OutputFolder) + + P24ColorCard = cv2.imread(ColorCardFile)[:,:,::-1] + SquareSize = int(P24ColorCard.shape[0]/4) + HalfSquareSize = int(SquareSize/2) + # collect 24 colours from the captured color card: + Colors = np.zeros([3,24]) + for i in range(24): + Row = int(i/6) + Col = i - Row*6 + rr = Row*SquareSize + HalfSquareSize + cc = Col*SquareSize + HalfSquareSize + Colors[0,i] = P24ColorCard[rr,cc,0] + Colors[1,i] = P24ColorCard[rr,cc,1] + Colors[2,i] = P24ColorCard[rr,cc,2] + print('Colors = \n', Colors) + + # in casue wild cards are used + ImageFiles = sorted(glob.glob(ImageFile)) + for i,ImageFile_ in enumerate(ImageFiles): + Image = cv2.imread(ImageFile_)[:,:,::-1] + Image = rotateImage(Image, RotationAngle) + if i == 0: + RectList = selectColorCard(Image, AspectRatio) + print('Rect = \n', RectList[0]) + + dic = {'Colorbar':np.asarray(RectList[0]), 'RotationAngle':RotationAngle} + cv2yml.dic2yml(os.path.join(OutputFolder, 'ColorbarRectangle.yml'), dic) + + Centre, Width, Height, Angle = getRectangleParamters(RectList[0]) + MapX, MapY = createMap(Centre, Width, Height, Angle) + RectifiedColorCard = cv2.remap(Image, MapX, MapY, cv2.INTER_CUBIC) + + Captured_Colors = np.zeros([3,24]) + SquareSize2 = int(RectifiedColorCard.shape[0]/4) + HalfSquareSize2 = int(SquareSize2/2) + for i in range(24): + Row = int(i/6) + Col = i - Row*6 + rr = Row*SquareSize2 + HalfSquareSize2 + cc = Col*SquareSize2 + HalfSquareSize2 + Captured_R = RectifiedColorCard[rr-10:rr+10, cc-10:cc+10, 0].astype(np.float) + Captured_G = RectifiedColorCard[rr-10:rr+10, cc-10:cc+10, 1].astype(np.float) + Captured_B = RectifiedColorCard[rr-10:rr+10, cc-10:cc+10, 2].astype(np.float) + Captured_R = np.sum(Captured_R)/Captured_R.size + Captured_G = np.sum(Captured_G)/Captured_G.size + Captured_B = np.sum(Captured_B)/Captured_B.size + Captured_Colors[0,i] = Captured_R + Captured_Colors[1,i] = Captured_G + Captured_Colors[2,i] = Captured_B +# if Captured_R < 254 and Captured_G < 254 and Captured_B < 254: +# # only accepts unsaturated colors +# Captured_Colors.append(np.asarray([Captured_R, Captured_G, Captured_B], dtype = np.float)) +# print('Captured_Colors = \n', Captured_Colors) + + # initial values + ColorMatrix = np.eye(3) + ColorConstant = np.zeros([3,1]) + ColorGamma = np.ones([3,1]) +# print('ColorMatrix = \n', ColorMatrix) +# print('ColorConstant = \n', ColorConstant) +# print('ColorGamma = \n', ColorGamma) + Arg = np.zeros([9 + 3 + 3]) + Arg[:9] = ColorMatrix.reshape([9]) + Arg[9:12] = ColorConstant.reshape([3]) + Arg[12:15] = ColorGamma.reshape([3]) + + ArgRefined, _ = optimize.leastsq(getColorMatchingErrorVectorised, Arg, args=(Colors, Captured_Colors), maxfev=10000) + + ColorMatrix = ArgRefined[:9].reshape([3,3]) + ColorConstant = ArgRefined[9:12].reshape([3,1]) + ColorGamma = ArgRefined[12:15] + print('ColorMatrix = \n', ColorMatrix) + print('ColorConstant = \n', ColorConstant) + print('ColorGamma = \n', ColorGamma) + + ImageCorrected = correctColorVectorised(Image.astype(np.float), ColorMatrix, ColorConstant, ColorGamma) + + if len(OutputFolder) > 0: + OutputFile = os.path.join(OutputFolder, os.path.basename(ImageFile_)) + ImageCorrected[np.where(ImageCorrected < 0)] = 0 + ImageCorrected[np.where(ImageCorrected > 255)] = 255 + cv2.imwrite(OutputFile, np.uint8(ImageCorrected[:,:,2::-1])) + print('Saved ', OutputFile) + else: + ColorCardCorrected = correctColorVectorised(RectifiedColorCard, ColorMatrix, ColorConstant, ColorGamma) + plt.figure() + plt.imshow(RectifiedColorCard/255) + plt.title('Captured CameraTrax 24-color card') + plt.figure() + plt.imshow(ColorCardCorrected/255) + plt.title('Corrected CameraTrax 24-color card') + + plt.figure() + plt.imshow(Image) + plt.title('Captured Chamber Image') + plt.figure() + plt.imshow(ImageCorrected/255) + plt.title('Corrected Chamber Image') + + + plt.figure() + plt.imshow(P24ColorCard/255) + plt.title('Original CameraTrax 24-color card') + plt.show() + + +if __name__ == "__main__": + main(sys.argv[1:]) + + diff --git a/unwarp_rectify/estimateDistortion.py b/unwarp_rectify/estimateDistortion.py new file mode 100644 index 0000000..1f9a7f8 --- /dev/null +++ b/unwarp_rectify/estimateDistortion.py @@ -0,0 +1,186 @@ +# -*- coding: utf-8 -*- +""" +Created on Tue May 13 11:03:53 2014 + +@author: Chuong Nguyen, chuong.nguyen@anu.edu.au +""" +from __future__ import absolute_import, division, print_function + +import numpy as np +import cv2 +import sys, getopt, os +#from multiprocessing import Pool +import datetime + + +def getTargetPhysicalPoints(GridSize, SquareSize): + ObjPoints = np.zeros( (np.prod(GridSize), 3), np.float32 ) + ObjPoints[:,:2] = np.indices(GridSize).T.reshape(-1, 2) + ObjPoints *= SquareSize + return ObjPoints + +def readNameListFromFile(FileName, StepSize = 1, Path=''): + Path = os.path.dirname(os.path.join(Path, FileName)) + with open (os.path.join(Path, FileName), 'r') as myfile: + NameList=[line.rstrip() for line in myfile] + return NameList[::StepSize], Path + +def detectTargetImagePoints(FileName, PatternType, GridSize): + Criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) + print(' Process ' + FileName) + # Read image and convert to grayscale + Img = cv2.imread(FileName) + Gray = cv2.cvtColor(Img, cv2.COLOR_BGR2GRAY) + + if PatternType.lower() in 'chessboard': + # detect pattern in pyramid fashion to speed up detection process + PyramidLevels = 1 + int(round(np.log(Gray.shape[1] / 1024.0) / np.log(2.0))) + for i in range(PyramidLevels): + if i == 0: + PyramidGrays = [Gray] + else: + PyramidGrays.append(cv2.pyrDown(PyramidGrays[i-1])) + + # start from the top of + ret, ImgPoints = cv2.findChessboardCorners(PyramidGrays[PyramidLevels-1], GridSize, None) + if ret == False: + return np.array([]), Img.shape[2::-1] + else: + for i in range(PyramidLevels-1, -1, -1): + if i != PyramidLevels-1: + ImgPoints = ImgPoints*2.0 + cv2.cornerSubPix(PyramidGrays[i], ImgPoints,(11,11),(-1,-1), Criteria) + +# cv2.drawChessboardCorners(PyramidGrays[i], GridSize, ImgPoints, ret) +# cv2.imshow('Detected Corners', cv2.resize(PyramidGrays[i], PyramidGrays[PyramidLevels-1].shape[::-1])) +# cv2.moveWindow('Detected Corners', 0,0) +# cv2.waitKey(2000) +# cv2.destroyAllWindows() + + return ImgPoints.reshape(-1, 2), Img.shape[1::-1] + else: + print( PatternType + ' is not currently supported') + return np.array([]), Img.shape[2::-1] + +def calibrateCamera(ObjPointsList, ImgPointsList2, ImageSize, Flags): + RMS, CameraMatrix, DistCoefs, RVecs, TVecs = \ + cv2.calibrateCamera(ObjPointsList, ImgPointsList2, ImageSize, \ + None, None, None, None, Flags) + return RMS, CameraMatrix, DistCoefs, RVecs, TVecs + + +def saveCalibrationData(CalibFileName, SquareSize, ImageSize, CameraMatrix, DistCoefs, RVecs, TVecs, RMS): + with open (CalibFileName, 'w') as myfile: + print(' Write to ' + CalibFileName) + myfile.write('%YAML:1.0\n') + myfile.write('calibration_time: "' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + '"\n') + myfile.write('calibration_RMS: %f\n' %RMS) + myfile.write('# physical square size [mm] or distance between control points\n') + myfile.write('# if it equals 1, the physical size is not provided\n') + myfile.write('square_size: %f\n' %SquareSize) + myfile.write('image_width: %d\n' %ImageSize[0]) + myfile.write('image_height: %d\n' %ImageSize[1]) + myfile.write('camera_matrix: !!opencv-matrix\n') + myfile.write(' rows: 3\n') + myfile.write(' cols: 3\n') + myfile.write(' dt: d\n') + myfile.write(' data: [ %f, %f, %f, %f, %f, %f, %f, %f, %f]\n' \ + %(CameraMatrix[0,0], CameraMatrix[0,1], CameraMatrix[0,2], \ + CameraMatrix[1,0], CameraMatrix[1,1], CameraMatrix[1,2], \ + CameraMatrix[2,0], CameraMatrix[2,1], CameraMatrix[2,2])) + myfile.write('distortion_coefficients: !!opencv-matrix\n') + myfile.write(' rows: 5\n') + myfile.write(' cols: 1\n') + myfile.write(' dt: d\n') + myfile.write(' data: [ %f, %f, %f, %f, %f]\n' \ + %(DistCoefs[0][0], DistCoefs[0][1], DistCoefs[0][2], DistCoefs[0][3], DistCoefs[0][4])) + myfile.write('# rotation vectors of the camera\n') + myfile.write('RVecs: !!opencv-matrix\n') + myfile.write(' rows: %d\n' %len(RVecs)) + myfile.write(' cols: 3\n') + myfile.write(' dt: d\n') + myfile.write(' data: [ ') + datalist = [] + for RVec in RVecs: + datalist = datalist + ['%f' %RVec[0], '%f' %RVec[1], '%f' %RVec[2]] + myfile.write(', '.join(datalist)) + myfile.write(' ]\n') + + myfile.write('# translation vectors of the camera\n') + myfile.write('TVecs: !!opencv-matrix\n') + myfile.write(' rows: %d\n' %len(TVecs)) + myfile.write(' cols: 3\n') + myfile.write(' dt: d\n') + myfile.write(' data: [ ') + datalist = [] + for TVec in TVecs: + datalist = datalist + ['%f' %TVec[0], '%f' %TVec[1], '%f' %TVec[2]] + myfile.write(', '.join(datalist)) + myfile.write(' ]\n') + +def main(argv): + HelpString = 'estimateDistortion.py -i ' + \ + '-W -H -S ' + \ + '-o ' + \ + 'Example:\n' + \ + "$ ./estimateDistortion.py -i /home/chuong/Data/GC03L-temp/image_list.txt -o /home/chuong/Data/GC03L-temp/IMG_6425/calib_param.yml" + try: + opts, args = getopt.getopt(argv,"hi:W:H:S:o:",["ifile=","gwidth=","gheight=","ssize=","ofolder="]) + except getopt.GetoptError: + print(HelpString) + sys.exit(2) + if len(opts) == 0: + print(HelpString) + sys.exit() + + + InputListFile = '' + OutputFile = '' + GridWidth = 12 + GridHeight = 12 + SquareSize = 40.0 #mm + PatternType = 'chessboard' +# Flags = cv2.CALIB_FIX_ASPECT_RATIO + cv2.CALIB_ZERO_TANGENT_DIST + Flags = cv2.CALIB_FIX_PRINCIPAL_POINT + \ + cv2.CALIB_FIX_ASPECT_RATIO #+ \ +# cv2.CALIB_ZERO_TANGENT_DIST #+ \ +# cv2.CALIB_FIX_K3 + for opt, arg in opts: + if opt == '-h': + print(HelpString) + sys.exit() + elif opt in ("-i", "--ifile"): + InputListFile = arg + elif opt in ("-W", "--gwidth"): + GridWidth = int(arg) + elif opt in ("-H", "--gheight"): + GridHeight = int(arg) + elif opt in ("-S", "--ssize"): + SquareSize = float(arg) + elif opt in ("-o", "--ofile"): + OutputFile = arg + + GridSize = (GridWidth, GridHeight) + ObjPoints = getTargetPhysicalPoints(GridSize, SquareSize) + + NameList, Path = readNameListFromFile(InputListFile) + NameList = filter(None, NameList) + + ImgPointsList = [] + ObjPointsList = [] + for FileName in NameList: + ImgPoints, ImageSize = detectTargetImagePoints(os.path.join(Path, FileName), PatternType, GridSize) + if len(ImgPoints) == 0: + print('Cannot detect pattern from', FileName) + continue + ImgPointsList.append(ImgPoints) + ObjPointsList.append(ObjPoints) + + RMS, CameraMatrix, DistCoefs, RVecs, TVecs = \ + calibrateCamera(ObjPointsList, ImgPointsList, ImageSize, Flags) + print('Calibration RMS = %f' %RMS) + + saveCalibrationData(OutputFile, SquareSize, ImageSize, CameraMatrix, DistCoefs, RVecs, TVecs, RMS) + +if __name__ == "__main__": + main(sys.argv[1:]) \ No newline at end of file diff --git a/unwarp_rectify/pipeline.py b/unwarp_rectify/pipeline.py new file mode 100644 index 0000000..7aac72b --- /dev/null +++ b/unwarp_rectify/pipeline.py @@ -0,0 +1,408 @@ +# -*- coding: utf-8 -*- +""" +Created on Mon Jun 23 11:14:47 2014 + +@authors: initial skeleton by Joel Granados, updated by Chuong Nguyen +""" +from __future__ import absolute_import, division, print_function + + +import yaml +import numpy as np +from timestream.parse import ts_iter_images +import cv2 +import utils +import matplotlib.pyplot as plt +import os + +class PipeComponent ( object ): + actName = "None" + argNames = None + + runExpects = None + runReturns = None + + def __init__(self, *args, **kwargs): + raise NotImplementedError() + + def run(self, ts): + raise NotImplementedError() + + +class ImageUndistorter ( PipeComponent ): + actName = "undistort" + argNames = {"mess": "Apply lens distortion correction"} + + runExpects = np.ndarray + runReturns = np.ndarray + + def __init__(self, **kwargs): + try: + self.mess = kwargs["mess"] + self.cameraMatrix = np.asarray(kwargs["cameraMatrix"]) + self.distortCoefs = np.asarray(kwargs["distortCoefs"]) + self.imageSize = tuple(kwargs["imageSize"]) + self.rotationAngle = kwargs["rotationAngle"] + self.UndistMapX, self.UndistMapY = cv2.initUndistortRectifyMap( \ + self.cameraMatrix, self.distortCoefs, None, self.cameraMatrix, \ + self.imageSize, cv2.CV_32FC1) + except KeyError: + self.mess = "Unable to read all parameters for " + ImageUndistorter.actName + self.cameraMatrix = None + self.distortCoefs = None + self.imageSize = None + self.rotationAngle = None + self.UndistMapX, self.UndistMapY = None, None + + def run(self, image): + print(self.mess) + self.image = utils.rotateImage(image, self.rotationAngle) + if self.UndistMapX != None and self.UndistMapY != None: + self.imageUndistorted = cv2.remap(self.image.astype(np.uint8), \ + self.UndistMapX, self.UndistMapY, cv2.INTER_CUBIC) + return(self.imageUndistorted) + + def show(self): + plt.figure() + plt.imshow(self.image) + plt.title('Original image') + plt.figure() + plt.imshow(self.imageUndistorted) + plt.title('Undistorted image') + plt.show() + + +class ColorCardDetector ( PipeComponent ): + actName = "colorcarddetect" + argNames = {"mess": "Detect color card position and color info"} + + runExpects = np.ndarray + runReturns = [np.ndarray, list] + + def __init__(self, **kwargs): + try: + self.mess = kwargs["mess"] + self.colorcardTrueColors = kwargs["colorcardTrueColors"] + self.colorcardFile = kwargs["colorcardFile"] + self.colorcardPosition = kwargs["colorcardPosition"] + self.settingPath = kwargs["settingPath"] + except KeyError: + self.mess = "Unable to read parameters for " + ColorCardDetector.actName + self.colorcardTrueColors = None + self.colorcardFile = None + self.colorcardPosition = None + self.settingPath = None + + def run(self, image): + print(self.mess) + self.colorcardImage = cv2.imread(os.path.join(self.settingPath, self.colorcardFile))[:,:,::-1] + + # create image pyramid for multiscale matching + self.colorcardPyramid = utils.createImagePyramid(self.colorcardImage) + self.imagePyramid = utils.createImagePyramid(image) + SearchRange = [self.colorcardPyramid[0].shape[1], self.colorcardPyramid[0].shape[0]] + score, loc, angle = utils.matchTemplatePyramid(self.imagePyramid, self.colorcardPyramid, \ + 0, EstimatedLocation = self.colorcardPosition, SearchRange = SearchRange) + if score > 0.3: + # extract color information + self.foundCard = image[loc[1]-self.colorcardImage.shape[0]//2:loc[1]+self.colorcardImage.shape[0]//2, \ + loc[0]-self.colorcardImage.shape[1]//2:loc[0]+self.colorcardImage.shape[1]//2] + self.colorcardColors, _ = utils.getColorcardColors(self.foundCard, GridSize = [6, 4]) + self.colorcardParams = utils.estimateColorParameters(self.colorcardTrueColors, self.colorcardColors) + # for displaying + self.loc = loc + self.image = image + else: + print('Cannot find color card') + self.colorcardParams = [None, None, None] + + return(image, self.colorcardParams) + + def show(self): + plt.figure() + plt.imshow(self.image) + plt.hold(True) + plt.plot([self.loc[0]], [self.loc[1]], 'ys') + plt.text(self.loc[0]-30, self.loc[1]-15, 'ColorCard', color='yellow') + plt.title('Detected color card') + plt.figure() + plt.imshow(self.foundCard) + plt.title('Detected color card') + plt.show() + + +class ImageColorCorrector ( PipeComponent ): + actName = "colorcorrect" + argNames = {"mess": "Correct image color"} + + runExpects = [np.ndarray, list] + runReturns = np.ndarray + + def __init__(self, **kwargs): + try: + self.mess = kwargs["mess"] + except KeyError: + self.mess = "Unable to read parameters for " + ImageColorCorrector.actName + + def run(self, inputs): + print(self.mess) + image, colorcardParam = inputs + colorMatrix, colorConstant, colorGamma = colorcardParam + if colorMatrix != None: + self.imageCorrected = utils.correctColorVectorised(image.astype(np.float), colorMatrix, colorConstant, colorGamma) + self.imageCorrected[np.where(self.imageCorrected < 0)] = 0 + self.imageCorrected[np.where(self.imageCorrected > 255)] = 255 + self.imageCorrected = self.imageCorrected.astype(np.uint8) + self.image = image # for displaying + else: + print('Skip color correction') + self.imageCorrected = image + + return(self.imageCorrected) + + def show(self): + plt.figure() + plt.imshow(self.image) + plt.title('Image without color correction') + plt.figure() + plt.imshow(self.imageCorrected) + plt.title('Color-corrected image') + plt.show() + + +class TrayDetector ( PipeComponent ): + actName = "traydetect" + argNames = {"mess": "Detect tray positions"} + + runExpects = np.ndarray + runReturns = [np.ndarray, list, list] + + def __init__(self, **kwargs): + try: + self.mess = kwargs["mess"] + self.trayFiles = kwargs["trayFiles"] + self.trayNumber = kwargs["trayNumber"] + self.trayPositions = kwargs["trayPositions"] + self.settingPath = kwargs["settingPath"] + except KeyError: + self.mess = "Unable to read parameters for " + TrayDetector.actName + self.trayFiles = None + self.trayNumber = None + self.trayPositions = None + + def run(self, image): + print(self.mess) + self.image = image + temp = np.zeros_like(self.image) + temp[:,:,:] = image[:,:,:] + temp[:,:,1] = 0 # suppress green channel + self.imagePyramid = utils.createImagePyramid(temp) + self.trayPyramids = [] + for i in range(self.trayNumber): + trayImage = cv2.imread(os.path.join(self.settingPath, self.trayFiles % i))[:,:,::-1] + trayImage[:,:,1] = 0 # suppress green channel + trayPyramid = utils.createImagePyramid(trayImage) + self.trayPyramids.append(trayPyramid) + + self.trayLocs = [] + for i,trayPyramid in enumerate(self.trayPyramids): + SearchRange = [trayPyramid[0].shape[1]//6, trayPyramid[0].shape[0]//6] + score, loc, angle = utils.matchTemplatePyramid(self.imagePyramid, trayPyramid, \ + RotationAngle = 0, EstimatedLocation = self.trayPositions[i], SearchRange = SearchRange) + if score < 0.3: + print('Low tray matching score. Likely tray %d is missing.' %i) + self.trayLocs.append(None) + continue + self.trayLocs.append(loc) + + return(self.image, self.imagePyramid, self.trayLocs) + + def show(self): + plt.figure() + plt.imshow(self.image.astype(np.uint8)) + plt.hold(True) + PotIndex = 0 + for i,Loc in enumerate(self.trayLocs): + if Loc == None: + continue + plt.plot([Loc[0]], [Loc[1]], 'bo') + PotIndex = PotIndex + 1 + plt.title('Detected trays') + plt.show() + + +class PotDetector ( PipeComponent ): + actName = "potdetect" + argNames = {"mess": "Detect pot position"} + + runExpects = [np.ndarray, list, list] + runReturns = [np.ndarray, list] + + def __init__(self, **kwargs): + try: + self.mess = kwargs["mess"] + self.potFile = kwargs["potFile"] + self.potTemplateFile = kwargs["potTemplateFile"] + self.potPosition = kwargs["potPosition"] + self.potSize = kwargs["potSize"] + self.traySize = kwargs["traySize"] + self.settingPath = kwargs["settingPath"] + except KeyError: + self.mess = "Unable to read parameters for " + PotDetector.actName + self.potFile = None + self.potPosition = None + self.potSize = None + + def run(self, inputs): + print(self.mess) + self.image, self.imagePyramid, self.trayLocs = inputs + # read pot template image and scale to the pot size + potImage = cv2.imread(os.path.join(self.settingPath, self.potFile))[:,:,::-1] + potTemplateImage = cv2.imread(os.path.join(self.settingPath, self.potTemplateFile))[:,:,::-1] + potTemplateImage[:,:,1] = 0 # suppress green channel + potTemplateImage = cv2.resize(potTemplateImage.astype(np.uint8), (potImage.shape[1], potImage.shape[0])) + self.potPyramid = utils.createImagePyramid(potTemplateImage) + + XSteps = self.traySize[0]//self.potSize[0] + YSteps = self.traySize[1]//self.potSize[1] + StepX = self.traySize[0]//XSteps + StepY = self.traySize[1]//YSteps + + self.potLocs2 = [] + self.potLocs2_ = [] + for trayLoc in self.trayLocs: + StartX = trayLoc[0] - self.traySize[0]//2 + StepX//2 + StartY = trayLoc[1] + self.traySize[1]//2 - StepY//2 + SearchRange = [self.potPyramid[0].shape[1]//4, self.potPyramid[0].shape[0]//4] +# SearchRange = [32, 32] + print('SearchRange=', SearchRange) + potLocs = [] + potLocs_ = [] + for k in range(4): + for l in range(5): + estimateLoc = [StartX + StepX*k, StartY - StepY*l] + score, loc,angle = utils.matchTemplatePyramid(self.imagePyramid, \ + self.potPyramid, RotationAngle = 0, \ + EstimatedLocation = estimateLoc, NoLevels = 3, SearchRange = SearchRange) + potLocs.append(loc) + potLocs_.append(estimateLoc) + self.potLocs2.append(potLocs) + self.potLocs2_.append(potLocs_) + + return(self.image, self.potLocs2) + + def show(self): + plt.figure() + plt.imshow(self.image.astype(np.uint8)) + plt.hold(True) + PotIndex = 0 + for i,Loc in enumerate(self.trayLocs): + if Loc == None: + continue + plt.plot([Loc[0]], [Loc[1]], 'bo') + plt.text(Loc[0], Loc[1]-15, 'T'+str(i+1), color='blue', fontsize=20) + for PotLoc,PotLoc_ in zip(self.potLocs2[i], self.potLocs2_[i]): + plt.plot([PotLoc[0]], [PotLoc[1]], 'ro') + plt.text(PotLoc[0], PotLoc[1]-15, str(PotIndex+1), color='red') + plt.plot([PotLoc_[0]], [PotLoc_[1]], 'rx') + PotIndex = PotIndex + 1 + plt.title('Detected trays and pots') + plt.show() + + +class PlantExtractor ( PipeComponent ): + actName = "plantextract" + argNames = {"mess": "Extract plant biometrics"} + + runExpects = [np.ndarray, list] + runReturns = [np.ndarray, list] + + def __init__(self, **kwargs): + try: + self.mess = kwargs["mess"] + except KeyError: + self.mess = "Unable to read parameters for " + PlantExtractor.actName + + def run(self, inputs): + print(self.mess) + image, potPositions = inputs + print("Image size =", image.shape) + plantBiometrics = [] + return(image, plantBiometrics) + + def show(self): + pass + +class ImagePipeline ( object ): + complist = { ImageUndistorter.actName: ImageUndistorter, + ColorCardDetector.actName: ColorCardDetector, \ + ImageColorCorrector.actName: ImageColorCorrector, \ + TrayDetector.actName: TrayDetector, \ + PotDetector.actName: PotDetector, \ + PlantExtractor.actName: PlantExtractor \ + } + + def __init__(self, defFilePath): + f = file(defFilePath) + yamlStruct = yaml.load(f) + f.close() + + self.pipeline = [] + + # First elements needs to expect ndarray + elem = yamlStruct.pop(0) + if ( ImagePipeline.complist[elem[0]].runExpects is not np.ndarray ): + raise ValueError("First pipe element should expect ndarray") + self.pipeline.append( ImagePipeline.complist[elem[0]](**elem[1]) ) + + # Add elements checking for dependencies + for elem in yamlStruct: + elem[1]['settingPath'] = os.path.dirname(defFilePath) + if ( ImagePipeline.complist[elem[0]].runExpects != + self.pipeline[-1].__class__.runReturns ): + raise ValueError("Dependancy issue in pipeline") + + self.pipeline.append( ImagePipeline.complist[elem[0]](**elem[1]) ) + + def process(self, ts): + image = ts.getCurrentImage() + + # First elem with inpupt image + res = image + # Rest elems with previous results + for elem in self.pipeline: + res = elem.run(res) + elem.show() + + return (res) + + def printCompList(self): + print(ImagePipeline.complist) + + +class DummyTS(object): + def __init__(self, rootPath): + self.img_iter = ts_iter_images(rootPath) + self.counter = -1 + + def getCurrentImage(self): + for i in range(750): + self.img_iter.next() + self.currentImage = cv2.imread(self.img_iter.next())[:,:,::-1] + self.counter = self.counter + 1 + return self.currentImage + + def getFullImageList(self): + # go to the start + self.img_iter.seek(0) + imageList = list(self.img_iter) + # go back to current position + self.img_iter.seek(self.counter) + return imageList + +if __name__ == "__main__": + + ts = DummyTS('/mnt/phenocam/a_data/TimeStreams/BorevitzTest/BVZ0036/BVZ0036-GC02L~fullres-orig/') + ip = ImagePipeline("/home/chuong/Workspace/traitcapture-bin/unwarp_rectify/data/pipeline.yml") + + ip.process(ts) \ No newline at end of file diff --git a/unwarp_rectify/selectColorbarTrayPot.py b/unwarp_rectify/selectColorbarTrayPot.py new file mode 100644 index 0000000..e4d7449 --- /dev/null +++ b/unwarp_rectify/selectColorbarTrayPot.py @@ -0,0 +1,501 @@ +""" +Created on Mon Jun 16 2014 + +@author: Chuong Nguyen, chuongnguyen@anu.edu.au + +""" +from __future__ import absolute_import, division, print_function + +import sys +from PyQt4 import QtGui, QtCore + +from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas +from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar +import matplotlib.pyplot as plt +from matplotlib.widgets import Cursor + +import os +import cv2 +import numpy as np +import utils +import cv2yml +import yaml + +class Window(QtGui.QDialog): + def __init__(self, parent=None): + super(Window, self).__init__(parent) + + self.figure = plt.figure() + self.canvas = FigureCanvas(self.figure) + self.canvas.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding) + + self.toolbar = NavigationToolbar(self.canvas, self) + self.toolbar.hide() + + # Just some button + self.colorcardRadioButton = QtGui.QRadioButton('Select color car&d') + self.colorcardRadioButton.setChecked(False) + self.colorcardRadioButton.clicked.connect(self.selectWhat) + + self.trayRadioButton = QtGui.QRadioButton('Select &tray') + self.trayRadioButton.setChecked(False) + self.trayRadioButton.clicked.connect(self.selectWhat) + + self.potRadioButton = QtGui.QRadioButton('Select &pot') + self.potRadioButton.setChecked(False) + self.potRadioButton.clicked.connect(self.selectWhat) + + self.loadImageButton = QtGui.QPushButton('&Load image') + self.loadImageButton.clicked.connect(self.loadImage) + + self.rotateImageButton = QtGui.QPushButton('&Rotate 90-deg') + self.rotateImageButton.clicked.connect(self.rotateImage90Degrees) + + self.loadCamCalibButton = QtGui.QPushButton('Load &cam. param.') + self.loadCamCalibButton.clicked.connect(self.loadCamCalib) + + self.saveGeometriesButton = QtGui.QPushButton('&Save selected geometries') + self.saveGeometriesButton.clicked.connect(self.saveSelectedGeometries) + + self.saveTraysButton = QtGui.QPushButton('&Save selected tray images') + self.saveTraysButton.clicked.connect(self.saveSelectedTrayImages) + + self.saveColorcadButton = QtGui.QPushButton('&Save sel. col. card images') + self.saveColorcadButton.clicked.connect(self.saveSelectedColorcardImages) + + self.save2PipelineButton = QtGui.QPushButton('&Save as pipeline settings') + self.save2PipelineButton.clicked.connect(self.savePipelineSettings) + + self.zoomButton = QtGui.QPushButton('&Zoom') + self.zoomButton.setCheckable(True) + self.zoomButton.clicked.connect(self.zoom) + + self.panButton = QtGui.QPushButton('&Pan') + self.panButton.setCheckable(True) + self.panButton.clicked.connect(self.pan) + + self.homeButton = QtGui.QPushButton('&Home') + self.homeButton.clicked.connect(self.home) + + self.status = QtGui.QTextEdit('') + self.status.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding) + self.mousePosition = QtGui.QLabel('') + + # set the layout + layout = QtGui.QHBoxLayout() + rightWidget = QtGui.QWidget() + buttonlayout = QtGui.QVBoxLayout(rightWidget) + buttonlayout.addWidget(self.loadImageButton) + buttonlayout.addWidget(self.rotateImageButton) + buttonlayout.addWidget(self.loadCamCalibButton) + buttonlayout.addWidget(self.colorcardRadioButton) + buttonlayout.addWidget(self.trayRadioButton) + buttonlayout.addWidget(self.potRadioButton) + buttonlayout.addWidget(self.zoomButton) + buttonlayout.addWidget(self.panButton) + buttonlayout.addWidget(self.homeButton) + buttonlayout.addWidget(self.saveGeometriesButton) + buttonlayout.addWidget(self.saveColorcadButton) + buttonlayout.addWidget(self.saveTraysButton) + buttonlayout.addWidget(self.save2PipelineButton) + buttonlayout.addWidget(self.status) + buttonlayout.addWidget(self.mousePosition) + rightWidget.setMaximumWidth(200) + leftLayout = QtGui.QVBoxLayout() + leftLayout.addWidget(self.toolbar) + leftLayout.addWidget(self.canvas) + + layout.addWidget(rightWidget) + layout.addLayout(leftLayout) + self.setLayout(layout) + + self.group = QtGui.QButtonGroup() + self.group.addButton(self.colorcardRadioButton) + self.group.addButton(self.trayRadioButton) + self.group.addButton(self.potRadioButton) + + self.panMode = False + self.zoomMode = False + + self.ax = None + self.plotRect = None + self.plotImg = None + self.image = None + self.potTemplate = None + self.UndistMapX = None + self.UndistMapY = None + self.trayAspectRatio = 0.835 + self.colorcardAspectRatio = 1.5 + self.potAspectRatio = 1.0 + self.leftClicks = [] + + self.ImageSize = None + self.CameraMatrix = None + self.DistCoefs = None + +# # change cursor shape +# self.setCursor(QtGui.QCursor(QtCore.Qt.CrossCursor )) + + # Ouput parameters + self.colorcardList = [] + self.trayList = [] + self.potList = [] + self.rotationAngle = 0 + self.isDistortionCorrected = False + + def selectWhat(self): + if self.trayRadioButton.isChecked(): + self.status.append('Start selecting tray.') + elif self.colorcardRadioButton.isChecked(): + self.status.append('Start selecting color bar.') + else: + self.status.append('Start selecting pot.') + + def home(self): + self.toolbar.home() + def zoom(self): + self.toolbar.zoom() + if not self.zoomMode: + self.zoomMode = True + self.panMode = False + self.panButton.setChecked(False) + else: + self.zoomMode = False + def pan(self): + self.toolbar.pan() + if not self.panMode: + self.panMode = True + self.zoomMode = False + self.zoomButton.setChecked(False) + else: + self.panMode = False + + def loadImage(self): + ''' load and show an image''' + fname = QtGui.QFileDialog.getOpenFileName(self, 'Open image', '/mnt/phenocam/a_data/TimeStreams/BorevitzTest/BVZ0036/BVZ0036-GC02L~fullres-orig/2014/2014_06/2014_06_20/2014_06_20_12') + self.status.append('Loading image...') + app.processEvents() + self.image = cv2.imread(str(fname))[:,:,::-1] + self.status.append('Loaded image from ' + str(fname)) + + # reset all outputs + self.colorcardList = [] + self.trayList = [] + self.potList = [] + self.rotationAngle = 0 + self.isDistortionCorrected = False + + # Undistort image if mapping available + if not self.isDistortionCorrected and self.UndistMapX != None and self.UndistMapY != None: + self.image = cv2.remap(self.image.astype(np.uint8), self.UndistMapX, self.UndistMapY, cv2.INTER_CUBIC) + self.isDistortionCorrected = True + + if self.image != None: + if self.ax == None: + self.ax = self.figure.add_subplot(111) + self.ax.figure.canvas.mpl_connect('button_press_event', self.onMouseClicked) + self.ax.figure.canvas.mpl_connect('motion_notify_event', self.onMouseMoves) + self.ax.figure.canvas.mpl_connect('figure_enter_event', self.changeCursor) + self.ax.hold(False) + if self.plotImg == None: + self.plotImg = self.ax.imshow(self.image) + else: + self.plotImg.set_data(self.image) + self.figure.tight_layout() + self.canvas.draw() + + def changeCursor(self, event): +# cursor = Cursor(self.ax, useblit=True, color='red', linewidth=1) + self.canvas.setCursor(QtGui.QCursor(QtCore.Qt.CrossCursor )) + + def updateFigure(self): + xs, ys = [], [] + for Rect in self.colorcardList: + tl, bl, br, tr = Rect + xs = xs + [tl[0], bl[0], br[0], tr[0], tl[0], np.nan] + ys = ys + [tl[1], bl[1], br[1], tr[1], tl[1], np.nan] + for Rect in self.trayList: + tl, bl, br, tr = Rect + xs = xs + [tl[0], bl[0], br[0], tr[0], tl[0], np.nan] + ys = ys + [tl[1], bl[1], br[1], tr[1], tl[1], np.nan] + for Rect in self.potList: + tl, bl, br, tr = Rect + xs = xs + [tl[0], bl[0], br[0], tr[0], tl[0], np.nan] + ys = ys + [tl[1], bl[1], br[1], tr[1], tl[1], np.nan] + for x,y in self.leftClicks: + xs = xs + [x] + ys = ys + [y] +# if self.crosshair != None: +# xs = xs + [np.nan, 0, self.image.shape[1], np.nan, self.crosshair[0], self.crosshair[0], np.nan] +# ys = ys + [np.nan, self.crosshair[1], self.crosshair[1], np.nan, 0, self.image.shape[0], np.nan] + if len(xs) > 0 and len(ys) > 0: + if self.plotRect == None: + self.ax.hold(True) + self.plotRect, = self.ax.plot(xs, ys, 'b') + self.ax.hold(False) + self.ax.set_xlim([0,self.image.shape[1]]) + self.ax.set_ylim([0,self.image.shape[0]]) + self.ax.invert_yaxis() + else: + self.plotRect.set_data(xs, ys) + self.canvas.draw() + + app.processEvents() + + + def loadCamCalib(self): + ''' load camera calibration image and show an image''' + CalibFile = QtGui.QFileDialog.getOpenFileName(self, 'Open image', '/home/chuong/Workspace/traitcapture-bin/unwarp_rectify/data') + self.ImageSize, SquareSize, self.CameraMatrix, self.DistCoefs, RVecs, TVecs = utils.readCalibration(CalibFile) + self.status.append('Loaded camera parameters from ' + CalibFile) + print('CameraMatrix =', self.CameraMatrix) + print('DistCoefs =', self.DistCoefs) + self.UndistMapX, self.UndistMapY = cv2.initUndistortRectifyMap(self.CameraMatrix, self.DistCoefs, \ + None, self.CameraMatrix, self.ImageSize, cv2.CV_32FC1) + + if self.image != None: + self.image = cv2.remap(self.image.astype(np.uint8), self.UndistMapX, self.UndistMapY, cv2.INTER_CUBIC) + self.isDistortionCorrected = True + self.status.append('Corrected image distortion.') + if self.plotImg == None: + self.plotImg = self.ax.imshow(self.image) + else: + self.plotImg.set_data(self.image) + self.canvas.draw() + +# def loadPotTemplate(self): +# ''' load pot template image''' +# fname = QtGui.QFileDialog.getOpenFileName(self, 'Open image', '/home/chuong/Workspace/traitcapture-bin/unwarp_rectify/data') +# self.status.append('Loading image...') +# app.processEvents() +# self.potTemplate = cv2.imread(str(fname))[:,:,::-1] +# if len(self.potList) > 0: + + + def saveSelectedGeometries(self): + ''' save selected geometries''' + fname = QtGui.QFileDialog.getSaveFileName(self, 'Save selected geometries', '/home/chuong/Workspace/traitcapture-bin/unwarp_rectify/data') + colorcardList2 = [] + for colorcard in self.colorcardList: + colorcardList2 = colorcardList2 + colorcard + trayList2 = [] + for tray in self.trayList: + trayList2 = trayList2 + tray + potList2 = [] + for pot in self.potList: + potList2 = potList2 + pot + dicdata = {'colorcardself.crosshair = NoneList':np.asarray(colorcardList2), \ + 'trayList':np.asarray(trayList2), \ + 'potList':np.asarray(potList2), \ + 'rotationAngle':self.rotationAngle, \ + 'distortionCorrected':int(self.isDistortionCorrected)} + cv2yml.dic2yml(fname, dicdata) + self.status.append('Saved selected geometries to ' + fname) + + def saveSelectedTrayImages(self): + ''' save selected trays''' + fname = QtGui.QFileDialog.getSaveFileName(self, 'Save selected tray images', '/home/chuong/Workspace/traitcapture-bin/unwarp_rectify/data') + medianWidth, medianHeight = utils.getMedianRectSize(self.trayList) + rectifiedTrayImages = utils.rectifyRectImages(self.image, self.trayList, MedianSize = [medianWidth, medianHeight]) + for i,rectifiedImage in enumerate(rectifiedTrayImages): + cv2.imwrite(str(fname) %i, rectifiedImage) + + def saveSelectedColorcardImages(self): + ''' save selected trays''' + fname = QtGui.QFileDialog.getSaveFileName(self, 'Save selected color card images', '/home/chuong/Workspace/traitcapture-bin/unwarp_rectify/data') + medianWidth, medianHeight = utils.getMedianRectSize(self.colorcardList) + rectifiedColorcardImages = utils.rectifyRectImages(self.image, self.colorcardList, MedianSize = [medianWidth, medianHeight]) + for i,rectifiedImage in enumerate(rectifiedColorcardImages): + cv2.imwrite(str(fname) %i, rectifiedImage) + + def savePipelineSettings(self): + ''' save to pipeline setting file''' + fname = QtGui.QFileDialog.getSaveFileName(self, 'Save selection to pipeline settings', '/home/chuong/Workspace/traitcapture-bin/unwarp_rectify/data') + settingPath = os.path.dirname(str(fname)) + settings = [] + if self.ImageSize != None and self.CameraMatrix != None and self.DistCoefs != None: + undistort = ['undistort', \ + {'mess': 'perform optical undistortion', \ + 'cameraMatrix': self.CameraMatrix.tolist(), \ + 'distortCoefs': self.DistCoefs.tolist(), \ + 'imageSize': list(self.ImageSize), + 'rotationAngle': self.rotationAngle + } \ + ] + else: + undistort = ['undistort', {'mess': '---skip optical undistortion---'}] + settings.append(undistort) + + if len(self.colorcardList) > 0: + medianSize = utils.getMedianRectSize(self.colorcardList) + capturedColorcards = utils.rectifyRectImages(self.image, self.colorcardList, medianSize) + colorCardFile = 'CapturedColorcard.png' + cv2.imwrite(os.path.join(settingPath, colorCardFile), capturedColorcards[0][:,:,::-1].astype(np.uint8)) + colorcardColors, colorStd = utils.getColorcardColors(capturedColorcards[0], GridSize = [6,4]) + colorcardPosition, Width, Height, Angle = utils.getRectangleParamters(self.colorcardList[0]) + colorcarddetect = ['colorcarddetect', \ + {'mess': '---perform color card detection---', \ + 'colorcardFile': colorCardFile,\ + 'colorcardPosition': colorcardPosition.tolist(),\ + 'colorcardTrueColors': utils.CameraTrax_24ColorCard + } + ] + else: + colorcarddetect = ['colorcarddetect', {'mess': '---skip color card detection---'}] + settings.append(colorcarddetect) + + colorcorrect = ['colorcorrect', {'mess': '---perform color correction---'}] + settings.append(colorcorrect) + + if len(self.trayList) > 0: + trayMedianSize = utils.getMedianRectSize(self.trayList) + trayImages = utils.rectifyRectImages(self.image, self.trayList, trayMedianSize) + colorcardColors, colorStd = utils.getColorcardColors(capturedColorcards[0], GridSize = [6,4]) + trayDict = {'mess': '---perform tray detection---'} + trayDict['trayNumber'] = len(self.trayList) + trayDict['trayFiles'] = 'Tray_%02d.png' + trayPositions = [] + for i,tray in enumerate(trayImages): + cv2.imwrite(os.path.join(settingPath, trayDict['trayFiles'] %i), tray[:,:,::-1].astype(np.uint8)) + trayPosition, Width, Height, Angle = utils.getRectangleParamters(self.trayList[i]) + trayPositions.append(trayPosition.tolist()) + trayDict['trayPositions'] = trayPositions + traydetect = ['traydetect', trayDict] + else: + traydetect = ['traydetect', {'mess': '---skip tray detection---'}] + settings.append(traydetect) + + if len(self.potList) > 0: + trayMedianSize = utils.getMedianRectSize(self.trayList) + potPosition, Width, Height, Angle = utils.getRectangleParamters(self.potList[0]) + Width, Height = int(Width), int(Height) + topLeft = [int(self.potList[0][0][0]), int(self.potList[0][0][1])] + self.potImage = self.image[topLeft[1]:topLeft[1]+Height, topLeft[0]:topLeft[0]+Width, :] + potFile = 'Pot.png' + cv2.imwrite(os.path.join(settingPath, potFile), self.potImage[:,:,::-1].astype(np.uint8)) + potDict = {'mess': '---perform pot detection---'} + potDict['potPosition'] = potPosition.tolist() + potDict['potSize'] = [int(Width), int(Height)] + potDict['traySize'] = [int(trayMedianSize[0]), int(trayMedianSize[1])] + potDict['potFile'] = potFile + if self.potTemplate != None: + potTemplateFile = 'potTemplate.png' + cv2.imwrite(os.path.join(settingPath, potTemplateFile), self.potTemplate[:,:,::-1]) + potDict['potTemplateFile'] = potTemplateFile + potdetect = ['potdetect', potDict] + else: + potdetect = ['potdetect', {'mess': '---skip pot detection---'}] + settings.append(potdetect) + + plantextract = ['plantextract', {'mess': '---perfrom plant biometrics extraction---'}] + settings.append(plantextract) + + with open(fname, 'w') as outfile: + outfile.write( yaml.dump(settings, default_flow_style=None) ) + + + def rotateImage90Degrees(self): + if self.image == None: + self.status.append('No image to rotate.') + return + self.rotationAngle = self.rotationAngle + 90 + if self.rotationAngle >= 360: + self.rotationAngle = self.rotationAngle - 360 + self.image = np.rot90(self.image) #.astype(uint8) + self.status.append('Rot. angle = %d deg' %self.rotationAngle) + if self.plotImg == None: + self.plotImg = self.ax.imshow(self.image) + else: + self.plotImg.set_data(self.image) + self.canvas.draw() + + def onMouseClicked(self, event): + if self.panMode or self.zoomMode: + return + print('click', event.button, event.xdata, event.ydata) + + if event.button == 1 and event.xdata != None and event.ydata != None: + self.leftClicks.append([event.xdata, event.ydata]) + print('self.leftClicks =', self.leftClicks) + Rect = [] + AspectRatio = None + if self.trayRadioButton.isChecked(): + AspectRatio = self.trayAspectRatio + elif self.colorcardRadioButton.isChecked(): + AspectRatio = self.colorcardAspectRatio + elif self.potRadioButton.isChecked(): + AspectRatio = self.potAspectRatio + + if len(self.leftClicks) == 2 and AspectRatio != None: + if self.potRadioButton.isChecked(): + Rect = utils.getRectCornersFrom2Points(self.image, self.leftClicks, AspectRatio, Rounded = True) + else: + Rect = utils.getRectCornersFrom2Points(self.image, self.leftClicks, AspectRatio) + self.leftClicks = [] + elif len(self.leftClicks) == 4: + Rect = [[x,y] for x,y in self.leftClicks] + Rect = utils.correctPointOrder(Rect) + self.leftClicks = [] + + if len(Rect) > 0: + if self.trayRadioButton.isChecked(): + self.trayList.append(Rect) + self.status.append('Added tray selection.') + elif self.colorcardRadioButton.isChecked(): + self.colorcardList.append(Rect) + self.status.append('Added color card selection.') + else: + self.potList.append(Rect) + self.status.append('Added pot selection.') + self.updateFigure() + elif event.button == 3: + # remove the last selection + if len(self.leftClicks) > 0: + self.leftClicks = self.leftClicks[:-1] + self.status.append('Removed a previous click') + else: + if self.trayRadioButton.isChecked() and len(self.trayList) > 0: + self.trayList = self.trayList[:-1] + self.status.append('Removed a previous tray selection') + elif self.colorcardRadioButton.isChecked() and len(self.colorcardList) > 0: + self.colorcardList = self.colorcardList[:-1] + self.status.append('Removed a previous color card selection.') + elif self.potRadioButton.isChecked() and len(self.potList) > 0: + self.potList = self.potList[:-1] + self.status.append('Removed a previous pot selection') + self.updateFigure() + else: + print('Ignored click') + + def onMouseMoves(self, event): + if event.inaxes == self.ax: + self.mousePosition.setText('x=%d, y=%d' %(event.xdata, event.ydata)) +# self.crosshair = [event.xdata, event.ydata] + else: + self.mousePosition.setText('') +# self.crosshair = None +# self.updateFigure() + + def keyPressEvent(self, e): + if e.key() == QtCore.Qt.Key_Escape: + self.close() + + def closeEvent(self, event): + + quit_msg = "Are you sure you want to exit the program?" + reply = QtGui.QMessageBox.question(self, 'Message', + quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) + + if reply == QtGui.QMessageBox.Yes: + event.accept() + else: + event.ignore() + +if __name__ == '__main__': + app = QtGui.QApplication(sys.argv) + + main = Window() + main.setWindowTitle('Select Color Card, Trays, and Pot') + main.show() + + sys.exit(app.exec_()) \ No newline at end of file diff --git a/unwarp_rectify/trayprocessing.py b/unwarp_rectify/trayprocessing.py new file mode 100644 index 0000000..144686b --- /dev/null +++ b/unwarp_rectify/trayprocessing.py @@ -0,0 +1,205 @@ +# -*- coding: utf-8 -*- +""" +Created on Tue May 20 11:15:55 2014 + +@author: chuong nguyen, chuong.nguyen@anu.edu.au + +This script is to provide distortion correction and +image rectification with support as generator or function. +""" +from __future__ import absolute_import, division, print_function + +import os, sys, glob +from multiprocessing import Pool +import numpy as np +import cv2 +import cv2yml # support input/output of OpenCV's YAML file format +from matplotlib import pylab as plt + +# Supporting functions +def rotateImage(Image, Angle): + Center=tuple(np.array(Image.shape[0:2])/2) + RotationMatrix = cv2.getRotationMatrix2D(Center, Angle, 1.0) + return cv2.warpAffine(Image, RotationMatrix, Image.shape[0:2], flags=cv2.INTER_LINEAR) + +def rectifyTrayImages(Image, RectList, TrayImgSize): + Width, Height = TrayImgSize + RectifiedCorners = np.float32([[0,0], [0,Height], [Width,Height], [Width,0]]) + RectifiedTrayImageList = [] + for Rect in RectList: + Corners = np.float32(Rect) + M = cv2.getPerspectiveTransform(Corners, RectifiedCorners) + RectifiedTrayImage = cv2.warpPerspective(Image, M,(Width, Height)) + RectifiedTrayImageList.append(RectifiedTrayImage) + return RectifiedTrayImageList + +def joinTrayImages(TrayImageList, Shape = [2,4]): + if len(TrayImageList) == 0 or len(TrayImageList) != Shape[0]*Shape[1]: + print('Invalid inputs: ', TrayImageList, Shape) + return np.asarray([]) + + TrayShape = TrayImageList[0].shape + RectifiedImage = np.resize(np.zeros_like(TrayImageList[0]), (Shape[0]*TrayShape[0], Shape[1]*TrayShape[1], TrayShape[2])) + for r in range(Shape[0]): + for c in range(Shape[1]): + rr0 = (Shape[0]-r-1)*TrayShape[0] + rr1 = (Shape[0]-r)*TrayShape[0] + cc0 = c*TrayShape[1] + cc1 = (c+1)*TrayShape[1] + RectifiedImage[rr0:rr1, cc0:cc1, :] = TrayImageList[c+r*Shape[1]] + return RectifiedImage + +def readCalibFile(CalibFile): + print(' Read', CalibFile) + parameters = cv2yml.yml2dic(CalibFile) + print(' This file created on', parameters['calibration_time']) + SquareSize = parameters['square_size'] + ImageWidth = parameters['image_width'] + ImageHeight = parameters['image_height'] + ImageSize = (ImageWidth, ImageHeight) + CameraMatrix = parameters['camera_matrix'] + DistCoefs = parameters['distortion_coefficients'] + RVecs = parameters['RVecs'] + TVecs = parameters['TVecs'] + return ImageSize, SquareSize, CameraMatrix, DistCoefs, RVecs, TVecs + +def readTrayConfigFile(ConfigFile): + print(' Read', ConfigFile) + dicdata = cv2yml.yml2dic(ConfigFile) + print(' This file created on', dicdata['Date']) + TrayPixWidth = dicdata['TrayImgWidth'] + TrayPixHeight = dicdata['TrayImgHeight'] + RectList2 = dicdata['TrayRectList'].tolist() + RectList = [] + for i in range(0,len(RectList2),4): + RectList.append(RectList2[i:i+4]) + return RectList, TrayPixWidth, TrayPixHeight + +# Using the generator pattern (an iterable) +class UndistortionRectificationGenerator(object): + def __init__(self, \ + FileNameList = [], \ + CameraMatrix = np.asarray([]), DistortionCoeffients = np.asarray([]), \ + ImageSize = (), RotationAngle = 0, \ + TrayRectangleList = [], TrayImgSize = [1230, 1489], TrayArrangement = [2,4]): + + self.FileNameList = FileNameList + self.CameraMatrix = CameraMatrix + self.DistortionCoeffients = DistortionCoeffients + self.ImageSize = ImageSize + self.RotationAngle = RotationAngle + self.TrayRectangleList = TrayRectangleList + self.TrayImgSize = TrayImgSize + self.TrayArrangement = TrayArrangement + self.n = len(self.FileNameList) + self.num = 0 + + if len(self.CameraMatrix) > 0 and len(self.DistortionCoeffients) > 0 and len(self.ImageSize) > 0: + self.MapX, self.MapY = cv2.initUndistortRectifyMap(self.CameraMatrix, \ + self.DistortionCoeffients, None, self.CameraMatrix, self.ImageSize, cv2.CV_32FC1) + + def __iter__(self): + return self + + def __next__(self): + return self.next() + + def next(self): + if self.num < self.n and os.path.exists(self.FileNameList[self.num]): + print(' Read', self.FileNameList[self.num]) + Image = cv2.imread(self.FileNameList[self.num]) + self.num = self.num+1 + + if len(self.CameraMatrix) > 0 and len(self.DistortionCoeffients) > 0 and len(self.ImageSize) > 0: + ImageUndistorted = cv2.remap(Image, self.MapX, self.MapY, cv2.INTER_CUBIC) + else: + ImageUndistorted = Image + + if abs(self.RotationAngle) == 180: + ImageUndistorted = np.rot90(np.rot90(ImageUndistorted)) + elif self.RotationAngle != 0: + ImageUndistorted = rotateImage(ImageUndistorted, self.RotationAngle) + + RectifiedTrayImageList = rectifyTrayImages(ImageUndistorted, self.TrayRectangleList, self.TrayImgSize) + RectifiedImage = joinTrayImages(RectifiedTrayImageList, self.TrayArrangement) + + return RectifiedImage + else: + raise StopIteration() + + +# Using normal function with possible parallism +def UndistortionRectificationFunction(Arg): + FileNameIn, FileNameOut, MapX, MapY, RotationAngle, \ + TrayRectangleList, MedianTraySize, TrayArrangement = Arg + + print(' Read', FileNameIn) + Image = cv2.imread(FileNameIn) + if len(MapX) > 0 and len(MapY) > 0: + ImageUndistorted = cv2.remap(Image, MapX, MapY, cv2.INTER_CUBIC) + else: + ImageUndistorted = Image + + if abs(RotationAngle) == 180: + ImageUndistorted = np.rot90(np.rot90(ImageUndistorted)) + elif RotationAngle != 0: + ImageUndistorted = rotateImage(ImageUndistorted, RotationAngle) + + RectifiedTrayImageList = rectifyTrayImages(ImageUndistorted, TrayRectangleList, MedianTraySize) + RectifiedImage = joinTrayImages(RectifiedTrayImageList, TrayArrangement) + cv2.imwrite(FileNameOut, RectifiedImage) + print(' Wrote', FileNameOut) + +# Usage demos +def demo_parallel(ImageFileListIn, ImageFileListOut, CameraMatrix, DistortionCoeffients, \ + ImageSize, RotationAngle, TrayRectangleList, TrayImgSize, TrayArrangement): + + MapX, MapY = cv2.initUndistortRectifyMap(CameraMatrix, DistortionCoeffients, \ + None, CameraMatrix, ImageSize, cv2.CV_32FC1) + ArgList = [[ImageFileIn, ImageFileOut, MapX, MapY, RotationAngle, TrayRectangleList, TrayImgSize, TrayArrangement] \ + for ImageFileIn, ImageFileOut in zip(ImageFileListIn, ImageFileListOut)] + + # actual processing + ProcessPool = Pool() + ProcessPool.map(UndistortionRectificationFunction, ArgList) + +def demo_generator(ImageFileListIn, ImageFileListOut, CameraMatrix, DistortionCoeffients, \ + ImageSize, RotationAngle, TrayRectangleList, TrayImgSize, TrayArrangement): + + UndistRectGen = UndistortionRectificationGenerator(ImageFileListIn, \ + CameraMatrix, DistortionCoeffients, ImageSize, \ + RotationAngle, TrayRectangleList, TrayImgSize, TrayArrangement) + for i,RectifiedImage in enumerate(UndistRectGen): + plt.imshow(RectifiedImage) + plt.show() + cv2.imwrite(ImageFileListOut[i], RectifiedImage) + print(' Wrote', ImageFileListOut[i]) + +if __name__ == "__main__": + options = sys.argv[1:] # ignored for now + + # inputs data + CalibFile = '/home/chuong/Data/Calibration-Images/calib_parameters.yml' + ConfigFile = '/home/chuong/Data/GC03L-temp/corrected/TrayConfig.yml' + InputImagePattern = '/home/chuong/Data/GC03L-temp/IMG*JPG' + OutputFolder = '/home/chuong/Data/GC03L-temp/Rectified' + RotationAngle = 180.0 # so that chamber door is on bottom side of images + TrayArrangement = [2,4] # 2-rows by 4-columns tray arrangement + + # data preparation + if not os.path.exists(OutputFolder): + os.mkdir(OutputFolder) + ImageFileListIn = sorted(glob.glob(InputImagePattern)) + ImageFileListOut = [os.path.join(OutputFolder, os.path.basename(ImageFile)) for ImageFile in ImageFileListIn] + ImageSize, SquareSize, CameraMatrix, DistortionCoeffients, RVecs, TVecs = readCalibFile(CalibFile) + TrayRectangleList, TrayImgWidth, TrayImgHeight = readTrayConfigFile(ConfigFile) + + # This can be set to a fixed value so that rectified tray image sizes + # are the same for different experiments + TrayImgSize = [TrayImgWidth, TrayImgHeight] # [1230, 1489] + + # Demos +# demo_parallel(ImageFileListIn, ImageFileListOut, CameraMatrix, DistortionCoeffients, +# ImageSize, RotationAngle, TrayRectangleList, TrayImgSize, TrayArrangement) + demo_generator(ImageFileListIn, ImageFileListOut, CameraMatrix, DistortionCoeffients, + ImageSize, RotationAngle, TrayRectangleList, TrayImgSize, TrayArrangement) \ No newline at end of file diff --git a/unwarp_rectify/utils.py b/unwarp_rectify/utils.py new file mode 100644 index 0000000..41d9526 --- /dev/null +++ b/unwarp_rectify/utils.py @@ -0,0 +1,456 @@ +# -*- coding: utf-8 -*- +""" +Created on Mon Jun 16 15:45:22 2014 + +@author: chuong +""" +from __future__ import absolute_import, division, print_function + +import numpy as np +import cv2yml +import cv2 +from scipy import optimize + +#RED GRN BLU +CameraTrax_24ColorCard = \ + [[ 115., 196., 91., 94., 129., 98., 223., 58., 194., 93., 162., 229., \ + 49., 77., 173., 241., 190., 0., 242., 203., 162., 120., 84., 50.], \ + [ 83., 147., 122., 108., 128., 190., 124., 92., 82., 60., 190., 158., \ + 66., 153., 57., 201., 85., 135., 243., 203., 163., 120., 84., 50.], \ + [ 68., 127., 155., 66., 176., 168., 47., 174., 96., 103., 62., 41., \ + 147., 71., 60., 25., 150., 166., 245., 204., 162., 120., 84., 52.]] +CameraTrax_24ColorCard180deg = \ + [[ 50., 84., 120., 162., 203., 242., 0., 190., 241., 173., 77., 49., \ + 229., 162., 93., 194., 58., 223., 98., 129., 94., 91., 196., 115.], \ + [ 50., 84., 120., 163., 203., 243., 135., 85., 201., 57., 153., 66., \ + 158., 190., 60., 82., 92., 124., 190., 128., 108., 122., 147., 83.], \ + [ 52., 84., 120., 162., 204., 245., 166., 150., 25., 60., 71., 147., \ + 41., 62., 103., 96., 174., 47., 168., 176., 66., 155., 127., 68.]] + +def getRectCornersFrom2Points(Image, Points, AspectRatio, Rounded = False): +# print('Points =', Points) + Length = np.sqrt((Points[0][0] - Points[1][0])**2 + \ + (Points[0][1] - Points[1][1])**2) + Height = Length/np.sqrt(1+AspectRatio**2) + Width = Height*AspectRatio + Centre = np.asarray([Points[0][0] + Points[1][0], Points[0][1] + Points[1][1]])/2.0 + Angle = np.arctan2(Height, Width) - \ + np.arctan2(Points[1][1] - Points[0][1], Points[1][0] - Points[0][0]) + InitRect = createRectangle(Centre, Width, Height, Angle) + CornerTypes = ['topleft', 'bottomleft', 'bottomright', 'topright'] + Rect = [] + for Corner, Type in zip(InitRect, CornerTypes): + if not Rounded: + Corner = findCorner(Image, Corner, Type) + else: + Corner = findRoundedCorner(Image, Corner, Type) + Rect.append(Corner) + return Rect + +def createRectangle(Centre, Width, Height, Angle): + tl2 = np.asarray([-Width, -Height])/2.0 + bl2 = np.asarray([-Width, Height])/2.0 + br2 = np.asarray([ Width, Height])/2.0 + tr2 = np.asarray([ Width, -Height])/2.0 + RectFit = [tl2, bl2, br2, tr2] + for i in range(len(RectFit)): + # rotate around center + xrot = RectFit[i][0]*np.cos(Angle) + RectFit[i][1]*np.sin(Angle) + yrot = -RectFit[i][0]*np.sin(Angle) + RectFit[i][1]*np.cos(Angle) + RectFit[i][0], RectFit[i][1] = (xrot+Centre[0]), (yrot+Centre[1]) + return RectFit + +def getRectangleParamters(Rect): + tl = np.asarray(Rect[0]) + bl = np.asarray(Rect[1]) + br = np.asarray(Rect[2]) + tr = np.asarray(Rect[3]) + + # paramters of fitted Rectangle + Centre = (tl + bl + br + tr)/4.0 + Width = (np.linalg.norm(tr - tl) + np.linalg.norm(br - bl))/2.0 + Height = (np.linalg.norm(bl - tl) + np.linalg.norm(br - tr))/2.0 + Angle = (np.arctan2(-(tr[1] - tl[1]), tr[0] - tl[0]) + \ + np.arctan2(-(br[1] - bl[1]), br[0] - bl[0]) + \ + np.arctan2( bl[0] - tl[0] , bl[1] - tl[1]) + \ + np.arctan2( br[0] - tr[0] , br[1] - tr[1]))/4 + return Centre, Width, Height, Angle + +def findCorner(Image, Corner, CornerType = 'topleft', WindowSize = 100, Threshold = 50): + x, y = Corner + HWindowSize = int(WindowSize/2) + window = Image[y-HWindowSize:y+HWindowSize+1, x-HWindowSize:x+HWindowSize+1,:].astype(np.float) +# cv2.imwrite('/home/chuong/Data/GC03L-temp/corrected/'+CornerType+'.jpg', window) + foundLeftEdgeX = False + foundRightEdgeX = False + foundTopEdgeY = False + foundBottomEdgeY = False + for i in range(HWindowSize+1): + diff0 = np.sum(np.abs(window[HWindowSize, HWindowSize-i,:] - window[HWindowSize, HWindowSize,:])) + diff1 = np.sum(np.abs(window[HWindowSize, HWindowSize+i,:] - window[HWindowSize, HWindowSize,:])) + diff2 = np.sum(np.abs(window[HWindowSize-i, HWindowSize,:] - window[HWindowSize, HWindowSize,:])) + diff3 = np.sum(np.abs(window[HWindowSize+i, HWindowSize,:] - window[HWindowSize, HWindowSize,:])) + if diff0 > Threshold and not foundLeftEdgeX: + xLeftNew = x-i + foundLeftEdgeX = True + elif diff1 > Threshold and not foundRightEdgeX: + xRightNew = x+i + foundRightEdgeX = True + if diff2 > Threshold and not foundTopEdgeY: + yTopNew = y-i + foundTopEdgeY = True + elif diff3 > Threshold and not foundBottomEdgeY: + yBottomNew = y+i + foundBottomEdgeY = True + + if CornerType.lower() == 'topleft' and foundLeftEdgeX and foundTopEdgeY: + return [xLeftNew, yTopNew] + elif CornerType.lower() == 'bottomleft' and foundLeftEdgeX and foundBottomEdgeY: + return [xLeftNew, yBottomNew] + elif CornerType.lower() == 'bottomright' and foundRightEdgeX and foundBottomEdgeY: + return [xRightNew, yBottomNew] + elif CornerType.lower() == 'topright' and foundRightEdgeX and foundTopEdgeY: + return [xRightNew, yTopNew] + else: + print('Cannot detect corner ' + CornerType) + return [x, y] + +def findRoundedCorner(Image, Corner, CornerType = 'topleft', WindowSize = 100, Threshold = 50): + #TODO: add search for rounded corner with better accuracy + return Corner + +def correctPointOrder(Rect, tolerance = 40): + # find minimum values of x and y + minX = 10e6 + minY = 10e6 + for i in range(len(Rect[0])): + if minX > Rect[i][0]: + minX = Rect[i][0] + if minY > Rect[i][1]: + minY = Rect[i][1] + #separate left and right + topLeft, bottomLeft, topRight, bottomRight = [], [], [], [] + for i in range(len(Rect[0])): + if abs(minX - Rect[0][i]) < tolerance: + if abs(minY - Rect[i][1]) < tolerance: + topLeft = [Rect[i][0], Rect[i][1]] + else: + bottomLeft = [Rect[i][0], Rect[i][1]] + else: + if abs(minY - Rect[i][1]) < tolerance: + topRight = [Rect[i][0], Rect[i][1]] + else: + bottomRight = [Rect[i][0], Rect[i][1]] + if len(topLeft)*len(bottomLeft)*len(topRight)*len(bottomRight) == 0: + print('Cannot find corRect corner order. Change tolerance value.') + return Rect + else: + Rect = [topLeft, bottomLeft, bottomRight, topRight] + return Rect + +def getMedianRectSize(RectList): + WidthList = [] + HeightList = [] + for Rect in RectList: + Centre, Width, Height, Angle = getRectangleParamters(Rect) + WidthList.append(Width) + HeightList.append(Height) + MedianWidth = int(sorted(WidthList)[int(len(RectList)/2)]) + MedianHeight = int(sorted(HeightList)[int(len(RectList)/2)]) + return MedianWidth, MedianHeight + +def rectifyRectImages(Image, RectList, MedianSize): + Width, Height = MedianSize + RectifiedCorners = np.float32([[0,0], [0,Height], [Width,Height], [Width,0]]) + RectifiedTrayImages = [] + for Rect in RectList: + Corners = np.float32(Rect) + M = cv2.getPerspectiveTransform(Corners, RectifiedCorners) + RectifiedTrayImage = cv2.warpPerspective(Image, M,(Width, Height)) + RectifiedTrayImages.append(RectifiedTrayImage) + return RectifiedTrayImages + +def readCalibration(CalibFile): + parameters = cv2yml.yml2dic(CalibFile) + SquareSize = parameters['square_size'] + ImageWidth = parameters['image_width'] + ImageHeight = parameters['image_height'] + ImageSize = (ImageWidth, ImageHeight) + CameraMatrix = parameters['camera_matrix'] + DistCoefs = parameters['distortion_coefficients'] + RVecs = parameters['RVecs'] + TVecs = parameters['TVecs'] + return ImageSize, SquareSize, CameraMatrix, DistCoefs, RVecs, TVecs + +def readGeometries(GeometryFile): + parameters = cv2yml.yml2dic(GeometryFile) + rotationAngle = parameters['rotationAngle'] + distortionCorrected = bool(parameters['distortionCorrected']) + colorcardList = parameters['colorcardList'].tolist() + colorcardList2 = [] + for i in range(0,len(colorcardList),4): + colorcardList2.append([colorcardList[i], colorcardList[i+1], \ + colorcardList[i+2], colorcardList[i+3]]) + trayList = parameters['trayList'].tolist() + trayList2 = [] + for i in range(0,len(trayList),4): + trayList2.append([trayList[i], trayList[i+1], \ + trayList[i+2], trayList[i+3]]) + potList = parameters['potList'].tolist() + potList2 = [] + for i in range(0,len(potList),4): + potList2.append([potList[i], potList[i+1], \ + potList[i+2], potList[i+3]]) + return rotationAngle, distortionCorrected, colorcardList2, trayList2, potList2 + +def createMap(Centre, Width, Height, Angle): + MapX, MapY = np.meshgrid(np.arange(Width), np.arange(Height)) + MapX = MapX - Width/2.0 + MapY = MapY - Height/2.0 + MapX2 = MapX*np.cos(Angle) + MapY*np.sin(Angle) + Centre[0] + MapY2 = -MapX*np.sin(Angle) + MapY*np.cos(Angle) + Centre[1] + return MapX2.astype(np.float32), MapY2.astype(np.float32) + +def getColorcardColors(ColorCardCaptured, GridSize): + GridCols, GridRows = GridSize + Captured_Colors = np.zeros([3,GridRows*GridCols]) + STD_Colors = np.zeros([GridRows*GridCols]) + SquareSize2 = int(ColorCardCaptured.shape[0]/GridRows) + HalfSquareSize2 = int(SquareSize2/2) + for i in range(GridRows*GridCols): + Row = i//GridCols + Col = i - Row*GridCols + rr = Row*SquareSize2 + HalfSquareSize2 + cc = Col*SquareSize2 + HalfSquareSize2 + Captured_R = ColorCardCaptured[rr-10:rr+10, cc-10:cc+10, 0].astype(np.float) + Captured_G = ColorCardCaptured[rr-10:rr+10, cc-10:cc+10, 1].astype(np.float) + Captured_B = ColorCardCaptured[rr-10:rr+10, cc-10:cc+10, 2].astype(np.float) + STD_Colors[i] = np.std(Captured_R) + np.std(Captured_G) + np.std(Captured_B) + Captured_R = np.sum(Captured_R)/Captured_R.size + Captured_G = np.sum(Captured_G)/Captured_G.size + Captured_B = np.sum(Captured_B)/Captured_B.size + Captured_Colors[0,i] = Captured_R + Captured_Colors[1,i] = Captured_G + Captured_Colors[2,i] = Captured_B + return Captured_Colors, STD_Colors + +# Using modified Gamma Correction Algorithm by +# Constantinou2013 - A comparison of color correction algorithms for endoscopic cameras +def getColorMatchingError(Arg, Colors, Captured_Colors): + ColorMatrix = Arg[:9].reshape([3,3]) + ColorConstant = Arg[9:12] + ColorGamma = Arg[12:15] + ErrorList = [] + for Color, Captured_Color in zip(Colors, Captured_Colors): + Color2 = np.dot(ColorMatrix, Captured_Color) + ColorConstant + Color3 = 255.0 * np.power(Color2/255.0, ColorGamma) + Error = np.linalg.norm(Color - Color3) + ErrorList.append(Error) + return ErrorList + +def correctColor(Image, ColorMatrix, ColorConstant, ColorGamma): + ImageCorrected = np.zeros_like(Image) + for i in range(Image.shape[0]): + for j in range(Image.shape[1]): + Captured_Color = Image[i,j,:].reshape([3]) + Color2 = np.dot(ColorMatrix, Captured_Color) + ColorConstant + Color3 = 255.0 * np.power(Color2/255.0, ColorGamma) + ImageCorrected[i,j,:] = np.uint8(Color3) + return ImageCorrected + +# Using modified Gamma Correction Algorithm by +# Constantinou2013 - A comparison of color correction algorithms for endoscopic cameras +def getColorMatchingErrorVectorised(Arg, Colors, Captured_Colors): + ColorMatrix = Arg[:9].reshape([3,3]) + ColorConstant = Arg[9:12].reshape([3,1]) + ColorGamma = Arg[12:15] + + TempRGB = np.dot(ColorMatrix, Captured_Colors) + ColorConstant + Corrected_Colors = np.zeros_like(TempRGB) + Corrected_Colors[0,:] = 255.0*np.power(TempRGB[0,:]/255.0, ColorGamma[0]) + Corrected_Colors[1,:] = 255.0*np.power(TempRGB[1,:]/255.0, ColorGamma[1]) + Corrected_Colors[2,:] = 255.0*np.power(TempRGB[2,:]/255.0, ColorGamma[2]) + + Diff = Colors - Corrected_Colors + ErrorList = np.sqrt(np.sum(Diff*Diff, axis= 0)).tolist() + return ErrorList + +def estimateColorParameters(TrueColors, ActualColors): + # estimate color-correction parameters + colorMatrix = np.eye(3) + colorConstant = np.zeros([3,1]) + colorGamma = np.ones([3,1]) + + Arg2 = np.zeros([9 + 3 + 3]) + Arg2[:9] = colorMatrix.reshape([9]) + Arg2[9:12] = colorConstant.reshape([3]) + Arg2[12:15] = colorGamma.reshape([3]) + + ArgRefined, _ = optimize.leastsq(getColorMatchingErrorVectorised, \ + Arg2, args=(TrueColors, ActualColors), maxfev=10000) + + colorMatrix = ArgRefined[:9].reshape([3,3]) + colorConstant = ArgRefined[9:12].reshape([3,1]) + colorGamma = ArgRefined[12:15] + return colorMatrix, colorConstant, colorGamma + +def correctColorVectorised(Image, ColorMatrix, ColorConstant, ColorGamma): + Width, Height = Image.shape[1::-1] + CapturedR = Image[:,:,0].reshape([1,Width*Height]) + CapturedG = Image[:,:,1].reshape([1,Width*Height]) + CapturedB = Image[:,:,2].reshape([1,Width*Height]) + CapturedRGB = np.concatenate((CapturedR, CapturedG, CapturedB), axis=0) + + TempRGB = np.dot(ColorMatrix, CapturedRGB) + ColorConstant + CorrectedRGB = np.zeros_like(TempRGB) + CorrectedRGB[0,:] = 255.0*np.power(TempRGB[0,:]/255.0, ColorGamma[0]) + CorrectedRGB[1,:] = 255.0*np.power(TempRGB[1,:]/255.0, ColorGamma[1]) + CorrectedRGB[2,:] = 255.0*np.power(TempRGB[2,:]/255.0, ColorGamma[2]) + + CorrectedR = CorrectedRGB[0,:].reshape([Height, Width]) + CorrectedG = CorrectedRGB[1,:].reshape([Height, Width]) + CorrectedB = CorrectedRGB[2,:].reshape([Height, Width]) + ImageCorrected = np.zeros_like(Image) + ImageCorrected[:,:,0] = CorrectedR + ImageCorrected[:,:,1] = CorrectedG + ImageCorrected[:,:,2] = CorrectedB + return ImageCorrected + +def rotateImage(Image, RotationAngle = 0.0): + Image_ = Image + if RotationAngle%90.0 == 0: + k = RotationAngle//90.0 + Image_ = np.rot90(Image_, k) + elif RotationAngle != 0: + center=tuple(np.array(Image_.shape[0:2])/2) + rot_mat = cv2.getRotationMatrix2D(center, RotationAngle,1.0) + Image_ = cv2.warpAffine(Image_, rot_mat, Image_.shape[0:2],flags=cv2.INTER_LINEAR) + return Image_ + +def matchTemplateLocation(Image, Template, EstimatedLocation, SearchRange = [0.5, 0.5], RangeInImage = True): + if RangeInImage: # use image size + Width = Image.shape[1] + Height = Image.shape[0] + else: # use template size + Width = Template.shape[1] + Height = Template.shape[0] + + if SearchRange == None: # search throughout the whole images + CroppedHalfWidth = Width//2 + CroppedHalfHeight = Height//2 + elif SearchRange[0] <= 1.0 and SearchRange[1] <= 1.0: # in fraction values + CroppedHalfWidth = (Template.shape[1]+SearchRange[0]*Width)//2 + CroppedHalfHeight = (Template.shape[0]+SearchRange[1]*Height)//2 + else: # in pixels values + CroppedHalfWidth = (Template.shape[1]+SearchRange[0])//2 + CroppedHalfHeight = (Template.shape[0]+SearchRange[1])//2 + + if CroppedHalfWidth > Image.shape[1]//2-1: + CroppedHalfWidth = Image.shape[1]//2-1 + if CroppedHalfHeight > Image.shape[0]//2-1: + CroppedHalfHeight = Image.shape[0]//2-1 + + SearchTopLeftCorner = [EstimatedLocation[0]-CroppedHalfWidth, EstimatedLocation[1]-CroppedHalfHeight] + SearchBottomRightCorner = [EstimatedLocation[0]+CroppedHalfWidth, EstimatedLocation[1]+CroppedHalfHeight] + + return matchTemplateWindow(Image, Template, SearchTopLeftCorner, SearchBottomRightCorner) + +def matchTemplateWindow(Image, Template, SearchTopLeftCorner, SearchBottomRightCorner): + CropedImage = Image[SearchTopLeftCorner[1]:SearchBottomRightCorner[1], SearchTopLeftCorner[0]:SearchBottomRightCorner[0]] + corrMap = cv2.matchTemplate(CropedImage.astype(np.uint8), Template.astype(np.uint8), cv2.TM_CCOEFF_NORMED) + _, maxVal, _, maxLoc = cv2.minMaxLoc(corrMap) + # recalculate max position in cropped image space + matchedLocImageCropped = (maxLoc[0] + Template.shape[1]//2, + maxLoc[1] + Template.shape[0]//2) + # recalculate max position in full image space + matchedLocImage = (matchedLocImageCropped[0] + SearchTopLeftCorner[0], \ + matchedLocImageCropped[1] + SearchTopLeftCorner[1]) +# if isShow: +# plt.figure() +# plt.imshow(Template) +# plt.figure() +# plt.imshow(corrMap) +# plt.hold(True) +# plt.plot([maxLoc[0]], [maxLoc[1]], 'o') +# plt.figure() +# plt.imshow(CropedImage) +# plt.hold(True) +# plt.plot([matchedLocImageCropped[0]], [matchedLocImageCropped[1]], 'o') +# plt.figure() +# plt.imshow(Image) +# plt.hold(True) +# plt.plot([matchedLocImage[0]], [matchedLocImage[1]], 'o') +# plt.show() + + return matchedLocImage, maxVal, maxLoc, corrMap + +def createImagePyramid(Image, NoLevels = 5): + for i in range(NoLevels): + if i == 0: + PyramidImages = [Image.astype(np.uint8)] + else: + PyramidImages.append(cv2.pyrDown(PyramidImages[i-1]).astype(np.uint8)) + return PyramidImages + +def matchTemplatePyramid(PyramidImages, PyramidTemplates, RotationAngle = None, \ + EstimatedLocation = None, SearchRange = None, NoLevels = 4, FinalLevel = 1): + for i in range(NoLevels-1, -1, -1): + if i == NoLevels-1: + if EstimatedLocation == None: + maxLocEst = [PyramidImages[i].shape[1]//2, PyramidImages[i].shape[0]//2] # image center + else: + maxLocEst = [EstimatedLocation[0]//2**i, EstimatedLocation[1]//2**i] # scale position to the pyramid level + + if SearchRange[0] > 1.0 and SearchRange[1] > 1.0: + SearchRange2 = [SearchRange[0]//2**i, SearchRange[1]//2**i] + else: + SearchRange2 = SearchRange + + matchedLocImage, maxVal, maxLoc, corrMap = matchTemplateLocation(PyramidImages[i], PyramidTemplates[i], maxLocEst, SearchRange = SearchRange2) + if RotationAngle == None: + matchedLocImage180, maxVal180, maxLoc180, corrMap180 = matchTemplateLocation(np.rot90(PyramidImages[i],2).astype(np.uint8), PyramidTemplates[i], maxLocEst, SearchRange) + if maxVal < 0.3 and maxVal180 < 0.3: + print('#### Warning: low matching score ####') +# return None, None, None + if maxVal < maxVal180: + PyramidImages = [np.rot90(Img,2) for Img in PyramidImages] + matchedLocImage, matchedLocImage180 = matchedLocImage180, matchedLocImage + maxVal, maxVal180 = maxVal180, maxVal + maxLoc, maxLoc180 = maxLoc180, maxLoc + corrMap, corrMap180 = corrMap180, corrMap + RotationAngle = 180 + else: + RotationAngle = 0 + # rescale to location in level-0 image + matchedLocImage0 = (matchedLocImage[0]*2**i, matchedLocImage[1]*2**i) + else: + maxLocEst = (matchedLocImage0[0]//2**i, matchedLocImage0[1]//2**i) + searchRange = [6,6] + + matchedLocImage, maxVal, maxLoc, corrMap = matchTemplateLocation(PyramidImages[i], PyramidTemplates[i], maxLocEst, searchRange) + # rescale to location in level-0 image + matchedLocImage0 = (matchedLocImage[0]*2**i, matchedLocImage[1]*2**i) + +# plt.figure() +# plt.imshow(PyramidTemplates[i]) +# +# plt.figure() +# plt.imshow(corrMap) +# plt.hold(True) +# plt.plot([maxLoc[0]], [maxLoc[1]], 'o') +# plt.title('maxVal = %f' %maxVal) +# +# plt.figure() +# plt.imshow(PyramidImages[i]) +# plt.hold(True) +# plt.plot([matchedLocImage[0]], [matchedLocImage[1]], 'o') +# plt.plot([maxLocEst[0]], [maxLocEst[1]], 'x') +# plt.title('Level = %d, RotationAngle = %f' %(i, RotationAngle)) +# plt.show() + + if i == FinalLevel: + # Skip early to save time + break + + print('maxVal, maxLocImage, RotationAngle =', maxVal, matchedLocImage0, RotationAngle) + return maxVal, matchedLocImage0, RotationAngle