scientific_comp_projects/CODE/[python]thesis_old_scripts/spatial_calibration.py
2021-10-29 15:16:40 +02:00

203 lines
6.5 KiB
Python

# -*- coding: utf-8 -*-
"""
Created on Thu Mar 29 12:16:17 2018
@author: Armando FEMAT for the LMFA (Ecole Centrale de Lyon)
The purpose of this code is to make the spatial calibration for DaVis images.
It consist in taking a doble-image, separate them in two and superimpose them.
It also calculates the magnification in the picture [pixels]<->[mm]
"""
"Always code as if the person who will maintain your code is a maniac serial killer knows where you live"
import cv2
import numpy as np
import ReadIM
from skimage import exposure
"""
Variables you can change in fonction of the experiment you are doing (f.e. the image to do the calibration in)
"""
vbuff, vatts = ReadIM.extra.get_Buffer_andAttributeList('Mire_Jour.im7')
v_array, vbuff = ReadIM.extra.buffer_as_array(vbuff)
#Import the double-image
#src_img = cv2.imread('double_image2.jpg')
#src_img_grey = cv2.cvtColor(src_img, cv2.COLOR_BGR2GRAY)
#cv2.imshow('Grey Image',src_img_grey) #Test to see if the image you are using is the right one
"""
Variables initiation (don't touch unless you know what you are doing)
"""
data = {}
#data['img'] = src_img_grey.copy()
img_rescaled = exposure.rescale_intensity(v_array)
data['img'] = img_rescaled[0]
data['points'] = []
i = 0
upper_corner = []
lower_corner = []
"""
Defined functions to use inside the program
1.- pick_corner (takes in the clicks done by the mouse and save pixel location to a list)
2.- draw_rectangle ()
"""
def pick_corner(action, x, y, flags, userdata):
# Referencing global variables
global corner,i,j
# Action to be taken when left mouse button is pressed
if action==cv2.EVENT_LBUTTONDOWN:
cv2.circle(data['img'], (x,y),1, (255,255,255), 3, 16)
data['points'].append([x,y])
i += 1
def draw_rectangle(action, x, y, flags, userdata):
# Referencing global variables
global upper_corner, lower_corner
# Action to be taken when left mouse button is pressed
if action==cv2.EVENT_LBUTTONDOWN:
upper_corner=[(x,y)]
# Mark the center
cv2.circle(data['img'], upper_corner[0], 1, (255,255,0), 2, cv2.LINE_AA )
# Action to be taken when left mouse button is released
elif action==cv2.EVENT_LBUTTONUP:
lower_corner=[(x,y)]
cv2.circle(data['img'], lower_corner[0], 1, (255,255,0), 2, cv2.LINE_AA )
cv2.rectangle(data['img'], upper_corner[0], lower_corner[0], (0, 255, 0), thickness=2, lineType=cv2.LINE_8)
cv2.imshow("Window",data['img'])
#Make two images out of the double-image
#Obtain the image zone 1
data['image_left'] = data['img'].copy()
cv2.namedWindow("Window",cv2.WINDOW_NORMAL) #Create the window to show image
cv2.setMouseCallback("Window", draw_rectangle)
k = 0
while k!=27:
cv2.imshow("Window", data['image_left'])
k = cv2.waitKey(20) & 0xFF
#if k==99:
# data['img'] = src_img_grey.copy()
cv2.destroyAllWindows()
data['image_left'] = data['image_left'][upper_corner[0][1]:lower_corner[0][1],upper_corner[0][0]:lower_corner[0][0]]
#cv2.namedWindow("Left Image",cv2.WINDOW_AUTOSIZE)
#cv2.imshow("Left Image", data['image_left'])
cv2.waitKey(0)
cv2.destroyAllWindows()
#Obtain the calibration zone 1
data['image_right'] = data['img'].copy()
cv2.namedWindow("Window",cv2.WINDOW_NORMAL) #Create the window to show image
cv2.setMouseCallback("Window", draw_rectangle)
k = 0
while k!=27:
cv2.imshow("Window", data['image_right'])
k = cv2.waitKey(20) & 0xFF
#if k==99:
# data['img'] = src_img_grey.copy()
cv2.destroyAllWindows()
data['image_right'] = data['image_right'][upper_corner[0][1]:lower_corner[0][1],upper_corner[0][0]:lower_corner[0][0]]
#cv2.namedWindow("Right Image",cv2.WINDOW_AUTOSIZE)
#cv2.imshow("Right Image", data['image_right'])
cv2.waitKey(0)
cv2.destroyAllWindows()
#Obtain the calibration zone 1
cv2.namedWindow("Window",cv2.WINDOW_NORMAL) #Create the window to show image
cv2.setMouseCallback("Window", pick_corner)
k = 0
while k!=27:
cv2.imshow("Window", data['image_left'])
k = cv2.waitKey(20) & 0xFF
if i>3:
break
#if k==99:
# data['img'] = src_img_grey.copy()
cv2.destroyAllWindows()
pts_dst = np.asarray(data['points'])
data['points'] = []
#Obtain the calibration zone 2
cv2.namedWindow("Window",cv2.WINDOW_NORMAL) #Create the window to show image
cv2.setMouseCallback("Window", pick_corner)
k = 0
i = 0
while k!=27:
cv2.imshow("Window", data['image_right'])
k = cv2.waitKey(20) & 0xFF
if i>3:
break
#if k==99:
# data['img'] = src_img_grey.copy()
cv2.destroyAllWindows()
pts_src = np.asarray(data['points'])
#Obtain the homography transformation out of the calibration zone
h, status = cv2.findHomography(pts_src,pts_dst)
#Define the limits of the right and left images
#Apply the homography transformation
data['img_temp'] = cv2.warpPerspective(data['image_right'], h, (data['image_left'].shape[1],data['image_left'].shape[0]))
#Combine the images into one and show them to see if it worked
data['img_temp'] = cv2.cvtColor(data['img_temp'], cv2.COLOR_GRAY2BGR)
data['image_left'] = cv2.cvtColor(data['image_left'], cv2.COLOR_GRAY2BGR)
data['image_left'][0:len(data['image_left']),0:len(data['image_left'][0]),2] = 35535
data['img_temp'][0:len(data['image_left']),0:len(data['image_left'][0]),0] = 65535
data['img_final'] = data['image_left'] + data['img_temp']
data['img_final'] = exposure.rescale_intensity(data['img_final'])
cv2.namedWindow("Final Source Image",cv2.WINDOW_NORMAL)
cv2.namedWindow("Normal Source Image",cv2.WINDOW_NORMAL)
cv2.namedWindow("Warped Source Image",cv2.WINDOW_NORMAL)
cv2.imshow("Normal Source Image", data['image_left'])
cv2.imshow("Warped Source Image", data['img_temp'])
cv2.imshow("Final Source Image", data['img_final'])
#cv2.imwrite("imagenial.jpg", im_out)
""" Needs yet to be adapted to our case history
#Writing files
atts = ReadIM.load_AttributeList({'attribute':'value'})
ReadIM.WriteIM7('saved_file.im7', True, vbuff, atts.next)
del(vbuff)
ReadIM.DestroyBuffer(buff)
ReadIM.DestroyAttributeListSafe(atts)
"""
#End code and close everything
cv2.waitKey(0)
cv2.destroyAllWindows()