from __future__ import division, unicode_literals, print_function
import os
import subprocess
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('figure', figsize=(10,6))
mpl.rc('image', cmap='gray')
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
import pims
import trackpy as tp
from trackpy import predict
from pims import Frame
from scipy.optimize import curve_fit
import scipy as sp
### ###
### Cropping functions sometimes used ###
### ###
def cropngrey (img,*args):
'''
Crops the image down based on the value of coords and greyscales the output
'''
xmin = 0
xmax = 2048
ymin = 0 #1008
ymax = 2048 #1227
return img[ymin:ymax,xmin:xmax,0]
def crop (img,coords):
'''
Crops the image down based on the value of coords.
'''
xmin = coords[0]
xmax = coords[1]
ymin = coords[2]
ymax = coords[3]
return img[ymin:ymax,xmin:xmax]
### ###
###LOAD CURVE DETERMINING / PARTICLE TRACKING FUNCTIONS ###
### ###
def singleFrameAnalyse(frame, oddNoDiameter, minmass = 2000, invert = True):
'''Runs trackpy analysis for finding particles in a single frame
Use funciton for trying to determine the optimal parameters of particle size (oddNoDiameter)
and paritcle 'mass' (minmass) using a single frame initially. Study the outputted graphs as
described in the trackpy walkthrough. Once optimised run batchFrameAnalysis with these parameters
to locate all particles in all frames
'''
#locate the particles
f =tp.locate(frame, oddNoDiameter, minmass = minmass, invert = invert)
#plots the subpixel bias graphs. This apparently should be quite nice and flat in appearance...
tp.subpx_bias(f)
#Plot histogram of particle sizes
fig, ax = plt.subplots()
ax.hist(f['mass'], bins=20)
ax.set(xlabel='mass', ylabel='count')
return None
def batchFrameAnalyse(frames, oddNoDiameter, minmass = 2000, invert = True, bins = 20, showSubPixBias = True, showMassHist = True):
'''Batch analysis of all frames from mechanical test using trackpy to locate particles
See trackpy documentation for description of functions used inside this function
Inputs:
frames: imported frames of the photographs of sample under white light illumination at each strain step
oddNoDiameter: estimate of 'pixel size' of particles to locate MUST BE AN ODD NUMBER
minmass: estimate of the particle 'mass' used for particle idenitication
bins: integer of the number of hisogram bins used for plotting
showSubPixBias: bool for choosing to plot the sub pix bias or not
showMassHist: bool for choosing to plot the mass histogram or not
Returns:
f: array containing located particles and associated data
'''
#batch location of particles in all of the frames
f = tp.batch(frames, 15, minmass=minmass, invert= invert)
#plot sub px bias if required. Ideall this shouldn't have a dip in the middle.
if showSubPixBias == True:
tp.subpx_bias(f)
#plot a particle mass histogram if required
if showMassHist == True:
fig, ax = plt.subplots()
ax.hist(f['mass'], bins=bins)
ax.set(xlabel='mass', ylabel='count')
return f
def predictiveLink (f, frames, searchWindow = 42, memory = 2, framesToPlot = [0,1,2]):
'''Use trackpy to link particles into trajectories using the predictive linking function.
Takes in the particles previously located in all frames and attemps to link them together. I don;t really
know how it does this but its smart! Using the predictive linking allows bigger search windows to be used which would
otherwise cause the linking to timeout and crash. Based on the initial trajectories found the search direction is biased
under the assumption the particle will continue in that direction. Perfect here since these particle are definitely no random
walkers! Function also plots out several of the frames with trajectories overlayed for quick initial assessment
Inputs:
f: locations and details of the particles tracked
frames: photographs of sample used for plotting trajectories over
searchWindow: size of window to search over for the particle's movement between frames
memory: sometimes a particle can disappear so memory allows the particle to disappear for a couple of frames and then reappear and connects them together
framesToPlot: list containing intergers of frames which you want photographs plotting with trajectories overlaid
Returns:
t1: trajectories identified
'''
#predictive linking function
pred = tp.predict.NearestVelocityPredict()#
#link trajectories from f
t = pred.link_df(f, searchWindow, memory=memory)
#filters out trajectories which do not track successfully from the first to last frame. Others are useless for claculating strain across whole experiment
t1 = tp.filter_stubs(t,len(frames))
#Print tracjectories found and those which meet the filtering requirement
print('Before:', t['particle'].nunique())
print('After:', t1['particle'].nunique())
#plot the trajectories against a white background with the labels attached
plt.figure()
fig, ax = plt.subplots(len(framesToPlot) + 1, sharex=True)
#ax[0].set_aspect('equal')
tp.plot_traj(t1, label=True, ax=ax[0])
#plot the trajectories overlaying the photographs
for i in range (0,len(framesToPlot)):
tp.plot_traj(t1, superimpose = frames[framesToPlot[i]], ax=ax[i+1])
fig.subplots_adjust(hspace=0.5)
plt.show()
return t1
def plotParticles(frames, detectedParticles, framesToShow = [0,1,2,3,4]):
'''Plots the located particles over the photographsofthe samples for specified frames
'''
plt.figure()
for i in range (0,len(framesToShow)):
tp.annotate(detectedParticles[detectedParticles['frame'] == framesToShow[i]], frames[framesToShow[i]]);
return None
def saveTrajImages(frames, t1, loc):
'''Saves trajectories identified overlayed on the images.
These images must be assessed manually to ensure the particle tracking has successfully tracked particles and it doesnt skip around between parts of a particle as what often happens with things like dust.
'''
for i in range(len(frames)):
fig = plt.figure(figsize=(10,3))
ax = fig.add_subplot(111)
tp.plot_traj(t1, superimpose = frames[i], ax=ax)
fig.savefig(loc + 'trackpy output/frame'+ str(i) + '.png', dpi = 300)
plt.close()
return None
def procMESSEData(loc, fname, lastDataPt, zeroOffset = 0.0, skiprows = 10, direcAngMeas = False, lastAngle = 360, m = 0.00480, c = 0.821):
'''loads datafile from MESSE and extracts the force measured by the load cell at each strain step
Several things can go wrong here and this function often requires some dark magic from the user to work properly. If
it cocks up too much just open the datafile in excel and do it manually and then import the data skipping this file
or take a copy of the raw dta file and delete rows which are unhelpful...
Inputs:
loc: folder location of datafile
fname: string of filename to open
lastdatapt: integer corresponding to the last point of data logged before the strain is incremented (at which point the sample has stress relaxed the most). Used if you did not perform director rotation measurements. For example if there were 30seconds between strain increments then the lastDataPt woud probably contain '30' (or perhaps '29'...).
zeroOffset: if the zero point of the load cell is shifted for some reason them cheat it and shift the load cell readings slightly (equivilent to zeroing a balance)
skiprows: removes header from MESSE datafile. If your comment involved newlines then this will need to change (increase)
direcAngMeas: Boolean for if you performed director rotation at each strain step. Simplifies finding the correct data point to work with as you just take the rows where the director angle column value equals lastAngle (usually 360). If you did not perform the director rotation test then the data has to be extracted in a slightly different way which doesnt always work
lastAngle: the lastAngle which MESSE thinks you measured at. Usually you rotate the polariser and analyser every 10 degrees then remove the analyser and take a last photo. For this datapoint the file records a director angle of 360 so the load cell from this data point is used. However if you rotated the XPOL by 20deg steps the last datapoint would be 180 I think
m: gradient of calibration curve for converting force into a mass (as if hung from the load cell as done in calibration)
c: inctercept of calibration curve for converting force into a mass (as if hung from the load cell as done in calibration)
Returns:
force: list of force values recorded for each strain step, this can sometimes spit out garbage if the test has not been performed in a standard way or something went wrong in the test
MESSE_subset: The rows of data from the MESSE measurement file which were selected out
'''
#Load the datafile
MESSE_data = pd.read_csv(loc + fname, skiprows = skiprows, sep = '\t')
#Extract data if director rotation measurement were performed
if direcAngMeas == True:
MESSE_subset = MESSE_data[MESSE_data.DirectorAngle == lastAngle]
#extract data if director rotation measurements were not performed
else:
lastDataPt = str(lastDataPt)
#The very first data point is taken for the unstrained data. This could be used to zero against instead of using zeroOffset...
subset1 = MESSE_data[MESSE_data['Step'].str.contains('000000000000')]
#Extract rest of the data based on the last datapoint before the strain is incremented
subset2 = MESSE_data[MESSE_data['Step'].str.endswith(lastDataPt)]
#Shove the two subsets together
MESSE_subset = pd.concat([subset1,subset2])
#Calculate the force based on the calibration data given ***force = 0 corresponds to voltage reading of -171.04mV***
force = (m*(MESSE_subset['Force_(mV)']-zeroOffset) + c)*9.812
return force, MESSE_subset, MESSE_data
def getStrain(t1, x_tracers):
'''Calculates the localised strain based on the trajectories found and the ones specified.
'''
#Manipulate the pandas array containing the tracjectories into the right shape for extracting the required data
unstacked = t1.set_index(['particle', 'frame'])[['x','y']].unstack()
unstacked.index
#Extract the particle position data for each tracer particle into their own pandas dataseries
x1 = t1[t1.particle == x_tracers[0]]
x2 = t1[t1.particle == x_tracers[1]]
#calculate the strain
d_length = x2.set_index(['frame']) - x1.set_index(['frame'])
dx = d_length['x']
strain = dx/dx[0] - 1
return strain, x1, x2
def getDx(t1, x_tracers):
'''same as getStrain but get the pixel separation instead of strain. Useful for something but I forget what... I think for poisson ratio calculation?
'''
unstacked = t1.set_index(['particle', 'frame'])[['x','y']].unstack()
unstacked.index
x1 = t1[t1.particle == x_tracers[0]]
x2 = t1[t1.particle == x_tracers[1]]
d_length = x2.set_index(['frame']) - x1.set_index(['frame'])
dx = d_length['x']
return dx
def getStressStrainCurve(initial_width_px, sample_thickness, force, strain, camera_calibration = 11.76e-3/1526):
'''Calculates and plots the tensile load curve based on particle tracking and MESSE data.
Inputs:
initial_width: initial pixel with of the sample measured manually from ImageJ
sample_thickness: initial sample thickness measured with the micrometer measured before testing and in the middle of the sample
force: list of force data extracted from the MESSE datafile
strain: list of strain data from particle tracking or manually loaded
camera_calibration: conversion of pixel lengths into real units. Default is set for first navitar camera bought and at its widest zoom level
Returns:
final_stress: list of true stress
final_strain: list of strain values
'''
#camera calibration 1526px = 11.760mm
camera_calibration = 11.76e-3/1526
#Calculate engineering stress
initial_width = initial_width_px*camera_calibration
stress = force/(sample_thickness*initial_width)
#stress = stress.drop([1268, 1743])
list(range(len(stress)))
#Calculate true stress. Assume constant volume in doing this by multiplying engineerging stress by example extension ratio (strain+1)
final_stress = stress[0:len(strain)].values*(strain.values+1)
#Get a list of the strain
final_strain = strain.values
#Plot tensile load curve
plt.plot(final_strain,final_stress)
return final_stress, final_strain
def exportMechData(final_stress, final_strain, f_out_name, loc):
'''Exports text file for tensile load curve data for fmechanical testing experiment.
In hindsight this is a fairly inefficient function but its simple and doesnt need to be efficient so who cares...
'''
#crete empty output array for data
output = np.zeros((len(final_strain),2))
#shove stress and strain data into the output array
for i in range(len(final_strain)):
output[i][0] = final_strain[i]
output[i][1] = final_stress[i]
fout = open(loc + f_out_name, "w")
#Write data to file
for i in range(len(output)):
fout.write("%.4f\t%.4f\n" % (output[i][0],output[i][1]))
fout.close()
return None
### ###
### FUNCTIONS FOR MEASURING FILM WIDTH ###
### ###
def MeasureWidth (frame,min_diff = 40, correction = 0, print_width = False):
'''
Measures the width of an inputted frame.
Takes the average brightness of the first row of an image and compares brightness of subsetquent rows,
when the difference exceeds min_diff the top of the film is determined to start and the index logged
Repeats working from the bottom of the frame up until the bottom of the film is found and the key logged
Pixel width of film is defined by the difference in the two keys identified.
Args:
frame: Input a image in the form of a numpy array-type or PIMS-type object
min_diff: Integer which gives the minimum different in averge intensities of rows which is used to define film boundaries
correction: a number which can be used to offset the calculated values if you know better for instance by measuring one frame yourself in ImageJ
print_width: Bool to allow you to choose whether or not you want the width printed
Returns:
width: determined pixel width of the film
'''
#For the most cases the film windows used are several pixels wide so the average of the rows is requried for the comparison
if np.shape(frame)[0] > 1:
#Calculate the reference intensity to compare the rest against
ref = np.average(frame[0])
#set up the initial values
diff = 0
top_index = 0
bottom_index = (len(frame))-1
#run through the widths until one gives an intensity difference greater than min_diff
while diff < min_diff:
diff = abs(ref - np.average(frame[top_index]))
top_index = top_index + 1
#reset diff and ref
diff = 0
ref = np.average(frame[-1])
#now find the bottom of the film
while diff < min_diff:
bottom_index = bottom_index - 1
diff = abs(ref - np.average(frame[bottom_index]))
width = bottom_index-top_index+correction
#In other cases such as finding the thickness of the sample based on the median of single pixel thicknesses (for direct thickness measurements)
#no average is required. Steps same as above
else:
ref = frame[0]
diff = 0
top_index = 0
bottom_index = (len(frame))-1
while diff < min_diff:
diff = abs(ref - frame[top_index])
top_index = top_index + 1
diff = 0
ref = frame[-1]
while diff < min_diff:
bottom_index = bottom_index - 1
diff = abs(ref - frame[bottom_index])
width = bottom_index-top_index+correction
#Print the width of each frame if requested
if print_width == True:
print(width)
return width
def MeasureFilmBrightness (frame, initial_diff = 40, divisor = 2.8):
'''
A poorly named function but oh well...generates a value for min_diff when the film brightness is not consistent.
For the case of measuring thickness directly. the brightness of the film varied a fair bit so the start and end of the film
would be difficult to measure with a single min_diff. Here an initial_diff is used to estimate the start and stop of the film and
then calculate the average brightness over the width determined. Then this is divided by divisor (value chosen with user intelligence)
in order to generate the min_diff for this particular frame. Funciton structured similar to MeasureWidth so see comments from there.
'''
if np.shape(frame)[0] > 1:
ref = np.average(frame[0][0:20])
diff = 0
top_index = 0
bottom_index = (len(frame))-1
while diff < initial_diff:
diff = abs(ref - np.average(frame[top_index][0:20]))
top_index = top_index + 1
diff = 0
ref = np.average(frame[-1][0:20])
while diff < initial_diff:
bottom_index = bottom_index - 1
diff = abs(ref - np.average(frame[bottom_index][0:20]))
max = np.average(frame[top_index+40:bottom_index-40].transpose()[0:500])
else:
ref = frame[0]
diff = 0
top_index = 0
bottom_index = (len(frame))-1
while diff < min_diff:
diff = abs(ref - frame[top_index])
top_index = top_index + 1
diff = 0
ref = frame[-1]
while diff < min_diff:
bottom_index = bottom_index - 1
diff = abs(ref - frame[bottom_index])
max = np.average(frame[top_index:bottom_index])
return (max - ref)/divisor
def MeasureFilmPos (frame,min_diff = 40, correction = 0, coords = [0,2048,0,2048]):
'''Determines the position of the film within the images.
Similar structure to MeasureWidth so see commenting there
'''
frame = crop(frame,coords)
ref = np.average(frame[0])
diff = 0
top_index = 0
bottom_index = (len(frame))-1
while diff < min_diff:
diff = abs(ref - np.average(frame[top_index]))
top_index = top_index + 1
diff = 0
ref = np.average(frame[-1])
while diff < min_diff:
bottom_index = bottom_index - 1
diff = abs(ref - np.average(frame[bottom_index]))
width = bottom_index-top_index+correction
midpt = top_index+(bottom_index-top_index)/2
return [width,midpt,bottom_index,top_index]
def multiFrameWidthMeasure(frames, min_diff = 27, correction = 0, print_width = False):
'''Measure the sample width for each strain step
See MeasureWidth for most of the documentation
'''
width = np.zeros(len(frames))
for i in range (0,len(frames)):
width[i] = MeasureWidth(frames[i],min_diff = min_diff, correction = correction, print_width = print_width)
#camera calibration 1526px = 11.760mm
camera_calibration = 11.76e-3/1526
width = width*camera_calibration
fract_width = width/width[0]
return width, fract_width
def outputWidthData(loc, f_name_in, f_name_out, width, fract_width):
'''Exports data from width measurement to file
Imports tensile load curve data, should probably put in a if exists statement incase this has not been measured yet.
'''
#Import tensile load curve data
load_curve_data = np.genfromtxt(loc+ f_name_in, delimiter="\t")
#print(load_curve_data)
#create an empty array for all of the data to be written to file
all_data = np.zeros((len(load_curve_data),6))
#populate the array containing all the data to be written to file
for i in range (0,len(load_curve_data)):
#add in tensile load curve data
for j in range (0,len(load_curve_data[0])):
all_data[i][j] = load_curve_data[i][j]
#add in the actual sample width in mm
all_data[i][3] = width[i]*1000
#add in the fractional width
all_data[i][4] = fract_width[i]
#add in the fractional thickness
all_data[i][5] = str(1/((1+load_curve_data[i][0])*fract_width[i]))
#write data to file
fout = open(loc + f_name_out, "w")
fout.write("Strain\tStress[MPa]\tDirectorAngle[deg]\tWidth[mm]\tFractionalWidth\tFractionalThickness\n")
for l in range(0,len(all_data)):
fout.write("%.4f\t%.4f\t%.4f\t%.4f\t%.4f\t%.4f\n" % (all_data[l][0], all_data[l][1], all_data[l][2], all_data[l][3], all_data[l][4], all_data[l][5]))
fout.close()
return None
### ###
### FUNCTIONS FOR MEASURING XPOL ###
### TRANSMISSION / FITTING FOR ###
### DIRECTOR ANGLE ###
### ###
####How images are stored in frames###
#frames[0]
#frames[36][0][2017]
#first indice is frame number
#second indice is y variable
#third invice is x variable
#indices start from top left of image
#frames = pims.ImageSequence(photo_loc + 'A000' + str(cycle_number) + step_number + '*.jpg', process_func = cropngrey)
def average_line (frame, line, x_start, window_dim):
'''calculates the average pixel value for a line, checked and works
'''
return(np.average(frame[line][x_start:x_start+window_dim]))
def MeasureEachPhoto (frame, central_coord, window_dims):
'''calculates the average pixel value for a a window in the image.
Used for calculating the intensity transmitted by XPOL of a window of the sample.
Performs the measurement for a single photograph at a single strain step and with
the XPOL at a particular angle. EG 3rd strain step with polariser and analyser at
30deg and 120deg respectively. Function called in StrainIncrementData (below)
'''
#take off window size but add 1 back on to keep window centralised
x_start = int(central_coord[0] - (window_dims[0]-1)/2)
y_start = int(central_coord[1] - (window_dims[1]-1)/2)
#create empty array for averaging data to go into
x_averages = np.zeros(window_dims[1])
#produce array of averaged x pixel values for each line in y
for i in range(0,window_dims[1]):
#print(y_start+i)
x_averages[i] = average_line(frame, y_start+i , x_start, window_dims[0])
#print(x_averages)
#return the average
return np.average(x_averages)
def StrainIncrementData (frames, central_coord, window_dims):
'''Measures the avg. transmitted XPOL intensity for all 36 XPOL rotation frames from each strain step.
the inputted frames should be the 36 xpol photographs
'''
step_averages = np.zeros(len(frames)-1)
for i in range(0,len(frames)-1):
step_averages[i] = MeasureEachPhoto(frames[i], central_coord, window_dims)
return step_averages
def LotsOfData (photo_loc, file_list, central_coord, window_dims, no_steps, file_prefix = 'A000', moving_centre = False):
'''Another unhelpful function name. Calculates all the transmitted XPOL intensities for a complete deformation at a particular location.
In the simplest example this uses StrainIncrementData (and MeasureEachPhoto within it) to run through all the XPOL photographs for a complete
mechancial test in order to measure the transmitted intensity for the defined window at each step and for each ofhte 36 rotation steps of the
polariser and analyser.
Inputs
photo_loc: String of the folder location of the photographs
file_list: gives string sections for ImageSequence to find the relevant photographs for each strain step
central_coord: list of coordinated for basing the measurement window at
window_dims: list giving the window size to measure over
no_steps: the number of strain steps involved in this test.
Returns
step_averages: a numpy array containing all the measured tranmitted XPOL data
frames: dunno why but the last set of frames are also returned
'''
#Calculate the size of the empty array needed to store all the measured data.
frames = pims.ImageSequence(photo_loc + file_prefix + file_list[0] + '*.jpg', process_func = cropngrey)
step_averages = np.zeros((no_steps,len(frames)-1))
window_dims[0] = 2*int(window_dims[0]/2)+1
window_dims[1] = 2*int(window_dims[1]/2)+1
print('Window dimensions used:[{},{}]'.format(window_dims[0],window_dims[1]))
if moving_centre == False:
print('Coordinate: {},{}. Number of steps to run is {}'.format(central_coord[0],central_coord[1],no_steps))
#Cycle over each strain step
for k in range(0,no_steps):
#Load the frames from this particular strain step and print the number of frames loaded (a progress check)
frames = pims.ImageSequence(photo_loc + file_prefix + file_list[k] + '*.jpg', process_func = cropngrey)
print(len(frames))
#Calculate the average intensities for this strain step for XPOL rotation and print when step completed
step_averages[k] = StrainIncrementData(frames, int(central_coord[0]),int(central_coord[1]), window_dims)
print('Step {} done!'.format(k))
else:
print('Number of steps to run is {}'.format(no_steps))
for k in range(0,no_steps):
#Load the frames from this particular strain step and print the number of frames loaded (a progress check)
frames = pims.ImageSequence(photo_loc + file_prefix + file_list[k] + '*.jpg', process_func = cropngrey)
print(len(frames))
#Calculate the average intensities for this strain step for XPOL rotation and print when step completed
step_averages[k] = StrainIncrementData(frames, [int(central_coord[k][0]),int(central_coord[k][1])], window_dims)
print('Step {} at coord ({},{}) done!'.format(k,int(central_coord[k][0]),int(central_coord[k][1])))
return step_averages, frames
def SaveData (filename, loc, data, step_rotation = 10):
'''Writes all the transmitted XPOL data for each strain step and XPOL rotations to file for later analysis.
Inputs:
filename: The filename to write to
loc: the location to write to
data: transmitted XPOL data generated by LotsOfData
Returns None
'''
fout = open(loc + filename, "w")
for l in range(0,len(data[0])):
#First write the XPOL rotation angle
angle = l*step_rotation
fout.write("%i\t" % angle)
#Then write the transmitted intensities from each strain step corresponding to the XPOL angle
for m in range(0,(len(data)-1)):
fout.write("%.2f\t" % (data[m][l]))
#Finish the line with a return character
fout.write("%.2f\n" % (data[(len(data)-1)][l]))
fout.close()
return None
def CosSqFn(x, a, b, c, d):
'''A general cosine squared function used for fitting
'''
return a * (np.cos((b*(x + c))*np.pi/180))**2 + d
def CosSqFit (no_steps, data, bounds=((0, 1.8, -70, 0), (np.inf, 2.2, 70, np.inf))):
'''Fits the cosine squared funtion to all the data generated for the transmitted XPOL intensity for each step
Not sure if this should really pass in the fitting function as well as a variable... (CosSqFn). Works so nevermind for now
Inputs:
no_steps: number of strain steps for the test
data: the transmitted intensity data generated previously for fitting to
bounds: fitting boundaries for the fitting parameters
Returns:
popt: array containing the optimised fitting parameters from each strain step
perr: array of errors for values in popt
'''
#Generate empty arrays to store the fitted parameters and the errors on these
#x = np.arange(0,360,10)
popt = np.zeros((no_steps,4))
perr = np.zeros((no_steps,4))
#popt[0], pcov = curve_fit(CosSqFn, data[0][:-1], data[1][:-1], p0)#bounds=((175, 1.8, 75, 25), (220, 2.2, 81, 40)))
#Run through each strain step and fit the cosine squared funtion
for i in range(0,no_steps):
popt[i], pcov = curve_fit(CosSqFn, data[0][:-1], data[i+1][:-1], bounds = bounds)
perr[i] = np.sqrt(np.diag(pcov))
return popt, perr
def SinSqFn(x, a, b, c, d):
'''Sine squared function for fitting
'''
return a * (np.sin((b*(x + c))*np.pi/180))**2 + d
def SinSqFit (no_steps, data, bounds=((0, 1.8, -70, 0), (np.inf, 2.2, 70, np.inf))):
'''Fits the sine squared funtion to all the data generated for the transmitted XPOL intensity for each step
Not sure if this should really pass in the fitting function as well as a variable... (SinSqFn). Works so nevermind for now.
Generally this one is used over the cos fitting as the fitted parameter c gives the possible director orientation
Inputs:
no_steps: number of strain steps for the test
data: the transmitted intensity data generated previously for fitting to
bounds: fitting boundaries for the fitting parameters
Returns:
popt: array containing the optimised fitting parameters from each strain step
perr: array of errors for values in popt
'''
#x = np.arange(0,360,10)
popt = np.zeros((no_steps,4))
perr = np.zeros((no_steps,4))
#popt[0], pcov = curve_fit(CosSqFn, data[0][:-1], data[1][:-1], p0)#bounds=((175, 1.8, 75, 25), (220, 2.2, 81, 40)))
for i in range(0,no_steps):
popt[i], pcov = curve_fit(SinSqFn, data[0][:-1], data[i+1][:-1], bounds = bounds)
perr[i] = np.sqrt(np.diag(pcov))
return popt, perr
def SinSqFn2(x, a, c, d):
'''Sine squared function for fitting
'''
return a * (np.sin((2*(x + c))*np.pi/180))**2 + d
def SinSqFit2 (no_steps, data, bounds=((0, -70, 0), (np.inf, 70, np.inf))):
'''Fits the sine squared funtion to all the data generated for the transmitted XPOL intensity for each step
Not sure if this should really pass in the fitting function as well as a variable... (SinSqFn). Works so nevermind for now.
Generally this one is used over the cos fitting as the fitted parameter c gives the possible director orientation
Inputs:
no_steps: number of strain steps for the test
data: the transmitted intensity data generated previously for fitting to
bounds: fitting boundaries for the fitting parameters
Returns:
popt: array containing the optimised fitting parameters from each strain step
perr: array of errors for values in popt
'''
#x = np.arange(0,360,10)
popt = np.zeros((no_steps,3))
perr = np.zeros((no_steps,3))
#popt[0], pcov = curve_fit(CosSqFn, data[0][:-1], data[1][:-1], p0)#bounds=((175, 1.8, 75, 25), (220, 2.2, 81, 40)))
for i in range(0,no_steps):
popt[i], pcov = curve_fit(SinSqFn2, data[0][:-1], data[i+1][:-1], bounds = bounds)
perr[i] = np.sqrt(np.diag(pcov))
return popt, perr
### ###
### A LOAD OF MESSY FUNCITONS FOR MEASURING DIRECTOR ANGLE ACROSS A SAMPLE DONT GO HERE, ITS NOT WORTH IT ###
### ###
def commandToGnuplot (command):
proc = subprocess.Popen(['C:/Program Files/gnuplot/bin/gnuplot.exe','-p'],
shell=True,
stdin=subprocess.PIPE,
)
command = command.encode()
proc.communicate(command)
return None
def quiverPlot (Data, n, savepath = '', fname = '', savefiles = True):
for m in range(0,n):
Data = Data.sort_index()
#get the value of horizontal and verticle lengths of the quiver arrows parametrically from the angles
U = np.cos(Data.loc[:,(str(m))]*np.pi/180)
V = np.sin(Data.loc[:,(str(m))]*np.pi/180)
#get the coordinates
X = Data.loc[:,'X']
Y = Data.loc[:,'Y']
plt.figure()
plt.title('Arrows scale with plot width, not view')
plt.gca().invert_yaxis()
Q = plt.quiver(X, Y, U, V, units='width', pivot = 'mid')
#print("fig saved{}".format(m))
if savefiles == True:
plt.savefig(savepath+fname+str(m), transparent = False)
return None
def genKeys (Data, n, rows, columns, emptykeys, mindiff = 55):
#Function generates keys for where the fitting has gone wrong. This could
#be the fitting function choosing the wrong minimum of the sine function
#or a random error in the fitting.
#Keys can run from 0 to rows*columns, any key flagged up has something up with it.
keys = np.copy(emptykeys)
#Not entirely sure what this is doing/ why I am doing it
Data = Data.sort_values(['Y','X'], ascending=[True,True])
#loop over number of frames
for m in range(0,n):
#loop over the number of rows - working from top left to bottom right
#row by row
for i in range(0,rows):
#k is just the indice to move to the next empty cell of keys for they next key to be input there
k = 0
for j in range(51*i,51*i+50):
diff = 0
#not sure why this if statement is here. I must sometimes accidently run out of the data and start trying
#to recall a cell out of bounds
if j < rows*columns - 1:
#diff = abs(float(Data.loc[:,str(m)][j])-float(Data.loc[:,str(m)][j+1]))
diff = abs(float(Data.loc[:,str(m)][j:j+1])-float(Data.loc[:,str(m)][j+1:j+2]))
#diff = abs(float(Data.iloc[[j]][str(m)])-float(Data.iloc[[j+1]][str(m)]))
if diff > mindiff:
keys[m][i][k] = j+1
k = k+1
Data = Data.sort_index()
return keys
def correctBulkDegeneracy (Data, n, ikeys):
ckeys = np.copy(ikeys)
Data.sort_values(['Y','X'], ascending=[True,True])
CorrectedData = Data.copy(deep = True)
CorrectedData = CorrectedData.sort_values(['Y','X'], ascending=[True,True])
for m in range (0,n):
ckeys[m].transpose()[2] = np.arange(0,358,51)
ckeys[m].transpose()[3] = np.arange(51,409,51)
for i in range (0,len(ckeys[m])):
if ckeys [m][i][0] != 0:
CorrectedData.loc[:,(str(m))][int(ckeys[m][i][2]):int(ckeys[m][i][0])] = \
Data.sort_values(['Y','X'], ascending=[True,True]).loc[:,(str(m))][int(ckeys[m][i][2]):int(ckeys[m][i][0])]-90
if ckeys [m][i][1] != 0:
CorrectedData.loc[:,(str(m))][int(ckeys[m][i][1]):int(ckeys[m][i][3])] = \
Data.sort_values(['Y','X'], ascending=[True,True]).loc[:,(str(m))][int(ckeys[m][i][1]):int(ckeys[m][i][3])]+90
CorrectedData = CorrectedData.sort_index()
Data = Data.sort_index()
return CorrectedData,ckeys
def singleKeyCorrect(Data, keyID, change):
#function to manually rotate one or a sequence of values by the same amount
#m is the frame number,start/stop are the keys to start/stop the change between
m = keyID[0]
start = keyID[1]
stop = keyID[2]
CorrectedData = Data.copy(deep = True)
CorrectedData = CorrectedData.sort_values(['Y','X'], ascending=[True,True])
CorrectedData.loc[:,(str(m))][start:stop] = \
Data.sort_values(['Y','X'], ascending=[True,True]).loc[:,(str(m))][start:stop]+change
CorrectedData = CorrectedData.sort_index()
Data = Data.sort_index()
return CorrectedData
def averageToCorrectSingleKey(Data, keyID, horizontal = True):
#function to manually rotate one or a sequence of values by the same amount
#m is the frame number,start/stop are the keys to start/stop the change between
m = keyID[0]
start = keyID[1]
#prep the data and make a copy so you don't overwrite the original
CorrectedData = Data.copy(deep = True)
CorrectedData = CorrectedData.sort_values(['Y','X'], ascending=[True,True])
Data = Data.sort_values(['Y','X'], ascending=[True,True])
#do the averaging mased on values either side of the special key
#this if statement does nothing but the else case is never used in the code I
#have used so far. In "quiver plotting.ipynb" I defined a function averageToCorrectSingleKey
#which does have the vertical averaging implemented
if horizontal == True:
avg = (Data.iloc[start-1][(str(m))] + Data.iloc[start+1][(str(m))])/2
else:
avg = (Data.iloc[start-1][(str(m))] + Data.iloc[start+1][(str(m))])/2
#put the data back in the correct shape
CorrectedData = CorrectedData.sort_index()
Data = Data.sort_index()
return avg
def simpleFixes(Data, inkeys, maxdiff=10, horizontal = True):
#prep the data and make a copy so you don't overwrite the original
CorrectedData = Data.copy(deep = True)
CorrectedData = CorrectedData.sort_values(['Y','X'], ascending=[True,True])
Data = Data.sort_values(['Y','X'], ascending=[True,True])
p = 0
q = 1
#go into each frame
for i in range(0,len(inkeys)):
#go into each row of inkeys
for j in range(0,len(inkeys[0])):
#go loop over the elements of a row of inkeys
for k in range(0,len(inkeys[0][0])):
if inkeys[i][j][k] != 0 and int(inkeys[i][j][k])+q+1<407:
#check if the director angles either side of the instance are sufficiently close such that we can assume
#this is a single point which has gone wrong.
#This only works for the horizontal case, the vertical case is not implemented here but it is in the jupyter
#notebook "quiver plotting.ipynb"
diff = abs(Data.iloc[int(inkeys[i][j][k]+q-1)][(str(i))] - Data.iloc[int(inkeys[i][j][k]+q+1)][(str(i))])
#the comparator below should be a < instead but I accidently got it to work like this and will leave it like to until I do this all properly
if diff > maxdiff:
#use function to correct based on average of neighbours
CorrectedData.loc[:,(str(i))][int(inkeys[i][j][k])+p:int(inkeys[i][j][k])+p+1] = \
averageToCorrectSingleKey(Data, [i,int(inkeys[i][j][k])], horizontal = horizontal)
#put the data back in the correct shape
CorrectedData = CorrectedData.sort_index()
Data = Data.sort_index()
return CorrectedData
# def averageToCorrectSingleKey(Data, keyID, horizontal = True):
# #function to manually rotate one or a sequence of values by the same amount
# #m is the frame number,start/stop are the keys to start/stop the change between
# m = keyID[0]
# start = keyID[1]
# #prep the data and make a copy so you don't overwrite the original
# CorrectedData = Data.copy(deep = True)
# CorrectedData = CorrectedData.sort_values(['Y','X'], ascending=[True,True])
# Data = Data.sort_values(['Y','X'], ascending=[True,True])
# #do the averaging mased on values either side of the special key
# if horizontal == True:
# avg = (Data.iloc[start-1][(str(m))] + Data.iloc[start+1][(str(m))])/2
# else:
# avg = (Data.iloc[start-51][(str(m))] + Data.iloc[start+51][(str(m))])/2
# #put the data back in the correct shape
# CorrectedData = CorrectedData.sort_index()
# Data = Data.sort_index()
# return avg
# def simpleFixes(Data, inkeys, maxdiff=10, horizontal = True):
# #prep the data and make a copy so you don't overwrite the original
# CorrectedData = Data.copy(deep = True)
# CorrectedData = CorrectedData.sort_values(['Y','X'], ascending=[True,True])
# Data = Data.sort_values(['Y','X'], ascending=[True,True])
# #fudge factors to get indices correct
# p = 0
# q = 1
# #go into each frame
# for i in range(0,len(inkeys)):
# #go into each row of inkeys
# for j in range(0,len(inkeys[0])):
# #go loop over the elements of a row of inkeys
# for k in range(0,len(inkeys[0][0])):
# if inkeys[i][j][k] != 0 and int(inkeys[i][j][k])+q+1<407:
# #check if the director angles either side of the instance are sufficiently close such that we can assume
# #this is a single point which has gone wrong
# if horizontal == True:
# diff = abs(Data.iloc[int(inkeys[i][j][k]+q-1)][(str(i))] -
# Data.iloc[int(inkeys[i][j][k]+q+1)][(str(i))])
# else:
# diff = abs(Data.iloc[int(inkeys[i][j][k]+q-51)][(str(i))] -
# Data.iloc[int(inkeys[i][j][k]+q+51)][(str(i))])
# if diff < maxdiff:
# #use function to correct based on average of neighbours
# CorrectedData.loc[:,(str(i))][int(inkeys[i][j][k])+p:int(inkeys[i][j][k])+p+1] = \
# averageToCorrectSingleKey(Data, [i,int(inkeys[i][j][k])], horizontal = horizontal)
# #put the data back in the correct shape
# CorrectedData = CorrectedData.sort_index()
# Data = Data.sort_index()
# return CorrectedData
# def AnalyseEachStep (cycle_number, step_number, photo_loc):
# '''
# A seeminly pointless function which opens images as frames using PIMS but then does nothing except return an empty array - output.
# '''
# frames = pims.ImageSequence(photo_loc + 'A000' + str(cycle_number) + step_number + '*.jpg', process_func = cropngrey)
# output = np.zeros(((5),1))
# return output