I'm not a programmer, but I enjoy the bit of coding that I do. This page includes a few snippets of code that I'm proud of, or particularly enjoyed making.
Sometimes linking physical and digital systems is hard. The project that I'm most proud of required linking a camera to a 3D printer. Most of the details are included in the paper, except for the code. I didn't include the code because much of it is specific to a unique hardware configuration. In hindsight, I think there are some tidbits of ImageJ code that could be helpful to someone.
Expand to see the full code
//Laser Beam Tracking for in-situ Microscope Videos
//Rev: 2 November 2021
//Doug Sassaman
//This code does the following:
//Interacts with user to select a position in the microscope video which has known machine-space coordinates (xml)
//Parses the xml file to return the scan extents
//Adjusts the xml file used to create the scan based on the user-selected known position in the microscope video
//Traces the laser beam during a scanning operation on the in situ microscope videos
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//Housekeeping:
if (isOpen("ROI Manager")) {
selectWindow("ROI Manager");
roiManager("Select All");
roiManager("Delete");
run("Close");
}
run("Close All");
run("Clear Results");
print("\\Clear");
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//File I/O
//User Input
xmlFile=File.openDialog("Choose an XML file:");
movingBeamVideo=File.openDialog("Choose the moving beam image sequence:");
stationaryBeamVideo=File.openDialog("Choose the stationary beam image sequence:");
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////DEFINE STATIC VARIABLES////
// Video Parameters //
open(movingBeamVideo); getDimensions(width, height, channels, slices, frames);
imageWidth = width; imageHeight = height; run("Close All");
fileNameInfo = fileNameParser(movingBeamVideo); //this is a function, see below
print("Zoom:",fileNameInfo[0],"Scan speed:",fileNameInfo[1]);
zoomPercent = fileNameInfo[0];
micronPerPX_x = parseFloat(((3.90 - (((3.90-0.57)*(1.70-(((12.00-1.7)*zoomPercent)+1.7)))/(1.70-12.00)))/imageWidth)*1000);
micronPerPX_y = parseFloat(((3.90 - (((3.90-0.57)*(1.70-(((12.00-1.7)*zoomPercent)+1.7)))/(1.70-12.00)))/imageHeight)*1000);
var fps = 4000; var secPerFrame = 1/fps; micosecPerFrame = secPerFrame*(1000000);
print("micronPerPX_x", micronPerPX_x);
print("micosecPerFrame",micosecPerFrame);
// Known Laser Scan Location [in mm] //
knownLoc = newArray(5.5,6.7); //all videos EXCEPT nylon 1500mm/s 60%power 50% zoom
// Laser Parameters//
scanSpeed = fileNameInfo[1];
var hatchSpacingMicron = 275; hatchSpacingPx = parseInt(hatchSpacingMicron/micronPerPX_x);
var beamDiamMicron = 600; beamDiamPx = parseInt(beamDiamMicron/micronPerPX_x);
pxPerMicrosec = (scanSpeed/1000)/micronPerPX_x; // distance in px beam travels per microsecond
//Laser Delays (in [us]):
startDelay = 0; // in number of frames (NOT MICROSECONDS)
markDelay = 1500;
jumpDelay = 700;
jumpTime = (hatchSpacingMicron*1000000)/(scanSpeed*1000); //converted to microsec
frameDelay = Math.floor((markDelay + jumpDelay + jumpTime)/micosecPerFrame)+0;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//Open an image where the laser has scanned a stationary KNOWN position (known in XML coordinates)
shiftedCoords = xmlShifter(stationaryBeamVideo,xmlFile, knownLoc,micronPerPX_x,micronPerPX_y);
Array.print(shiftedCoords);
//Open moving beam image sequence
run("Image Sequence...", "open="+movingBeamVideo+" sort");
run("Enhance Contrast", "saturated=0.35");
waitForUser( "Pause","set animation speed then click okay (image->stacks->animation");
getDimensions(width, height, channels, slices, frames);
print("number of slices:",slices);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//Initialize dynamic variables and indices:
scanLineNumber = 1; time=0; y=shiftedCoords[1]; xDirection = 1; frameIdx=3;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//Run main loop:
x=shiftedCoords[0];
while (time < (micosecPerFrame*slices) && frameIdx < 160) //time in microsec
{
print("time:",time,"x:",x,"y:",y,"line",scanLineNumber, "frame:",frameIdx);
time = time + 1;
x+=(pxPerMicrosec*xDirection);
if (x>shiftedCoords[2] || x<shiftedCoords[0])
{
xDirection = xDirection*(-1); //change direction
frameIdx+=frameDelay; //initiate mark and jump delay (one of each)
y-=hatchSpacingPx; //jump to next line
scanLineNumber+=1; //index scan line number
}
if (time % micosecPerFrame == 0) //sample the continuous time by frame rate (every 250us at current settings)
{
//draw circle here!
drawCircle(x,y,frameIdx,beamDiamPx);
frameIdx+=1;
print("frameIdx", frameIdx);
}
}//end while loop
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//FUNCTIONS
function fileNameParser(video) {
// Split the file name to determine scan speed and zoom
fileName = split(video,"/");
splitFileName = split(fileName[fileName.length-1],"_");
// Array.print(splitFileName);
for (i = 0; i < splitFileName.length; i++)
{
// print(splitFileName[i]);
if (matches(splitFileName[i], "(^.*Zoom.*$)"))
{
zoom = split(splitFileName[i],"(\d+|\\D+)");
zoom = parseInt(zoom[0])/100;
// print(zoom);
}
if (matches(splitFileName[i], "(^.*mms.*$)"))
{
scanSpeed = split(splitFileName[i],"(\d+|\\D+)");
scanSpeed = parseInt(scanSpeed[0]);
// print(scanSpeed);
}
}
return newArray(zoom,scanSpeed);
}
function xmlShifter(imageFile,xmlFile, knownLocation,
micronToPXwidth,micronToPXheight) {
//This function will output the shifted extents of XML file
//bottomLeft,bottomRight,topLeft,topRight in pixels
//Open the image sequence and scale according to microscope magnification
run("Image Sequence...", "open="+imageFile+" sort use");
run("Enhance Contrast", "saturated=0.35");
// setLocation(1000, -1050, 1280, 1024);
//Draw a circle around the laser at the end of it's last vector
run("Set Measurements...", "centroid redirect=None decimal=3"); setTool("oval");
Dialog.createNonBlocking("Pause the animation"); Dialog.addMessage("After you click YES here, the animation will begin\n Pause the animation, and then draw an oval around the laser at the end of it's last vector"); Dialog.show();
run("Start Animation");
waitForUser( "Pause","click OK once your oval has been drawn");
run("Measure"); xCircle = getResult("X", 0); yCircle = getResult("Y", 0); //x & y are in um (but with the default imageJ origin)
run("Close All");
// Scale known coords
xScaled = 1000*(knownLocation[0]/micronToPXwidth); yScaled = 1000*(knownLocation[1]/micronToPXheight);
print("x scaled:", xScaled, "y scaled",yScaled);
//Calculate translation amount
shiftLeft = -1*Math.round((xScaled-xCircle)); shiftUp = -1*Math.round((yScaled-yCircle));
print("shift left", shiftLeft,"shift up", shiftUp);
// Shift the xml extents
start = xmlParser(xmlFile,"first");
max = xmlParser(xmlFile,"max");
min = xmlParser(xmlFile,"min");
end = xmlParser(xmlFile,"end");
bottomLeft = newArray(Math.round((start[0]*1000)/micronToPXwidth)+shiftLeft,Math.round((start[1]*1000)/micronToPXheight+shiftUp));
bottomRight = newArray(Math.round((max[0]*1000)/micronToPXwidth)+shiftLeft,Math.round((max[1]*1000)/micronToPXheight+shiftUp));
topLeft = newArray(Math.round((min[0]*1000)/micronToPXwidth)+shiftLeft,Math.round((min[1]*1000)/micronToPXheight+shiftUp));
topRight = newArray(Math.round((end[0]*1000)/micronToPXwidth)+shiftLeft, Math.round((end[1]*1000)/micronToPXheight+shiftUp));
print("top right:");
Array.print(topRight);
return Array.concat(bottomLeft,bottomRight,topLeft,topRight);
}
function xmlParser(file, position) {
//This function goes through the xml file to find the min and max values for x and y
//If position = 'first', return the first line in XML
//If position = 'max' return the maximum extents for x XML coords
//If position = 'min' return the minimum extents for x XML coords
//If position = 'end' return end coords of last vector in XML
filestring=File.openAsString(file);
rows=split(filestring, "\n\n"); //XML skips lines, leave these out, split on new row
x = newArray(rows.length);
// print("length",rows.length);
y = newArray(rows.length);
for(i=0; i<rows.length; i++)
{
// print(rows[i]);
columns=split(rows[i],"'"); //this weeds out the scan speed and power commands
// print(columns.length);
if(columns.length==1) //This selects only the jump or mark commands
{
segments = split(columns[0],">,<");
// Array.print(segments);
x[i] = parseFloat(segments[1]); //x xml coord
y[i] = parseFloat(segments[2])*(-1); //y xml coord, negative value is to get coord direction to match imageJ
// print(x[i],y[i]);
}
else {
// print("else");
x[i]="";y[i]=""; }
}
x = Array.deleteValue(x, ""); y = Array.deleteValue(y, "");
Array.getStatistics(y, min, max);
yMin=min; yMax=max;
xMinima = Array.findMinima(x, 0.1);
xMin = x[xMinima[1]];
xMaxima = Array.findMaxima(x, 0.1);
xMax = x[xMaxima[1]];
if (position=="first") {
return newArray(x[position],y[position]);
} else {
if (position=="max") {
return newArray(xMax,yMax);
}
else {
if(position=="end"){
return newArray(xMax,yMin);
}
else {
return newArray(xMin,yMin);
}
}
}
}
function drawCircle(xCurr,yCurr,sliceCurr,diam) {
// draw an oval at the specified location in time (slice) and space (x,y)
// selectWindow("Rotated");
setSlice(sliceCurr);
run("Specify...", "width="+diam+" height="+diam+" x="+xCurr+" y="+yCurr+" slice="+sliceCurr+" oval constrain centered");
run("Draw", "slice");
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//References
//https://imagej.nih.gov/ij/macros/examples/PlotSigmoidDerivatives.txt <- nested functions
Something that might be useful to people is the circle drawing function. This can be called in a loop to draw shapes on each image in a stack or in a video.
function drawCircle(xCurr,yCurr,sliceCurr,diam) {
// draw an oval at the specified location in time (slice) and space (x,y)
setSlice(sliceCurr);
run("Specify...", "width="+diam+" height="+diam+" x="+xCurr+" y="+yCurr+" slice="+sliceCurr+" oval constrain centered");
run("Draw", "slice");
}
p.s. ImageJ has a hard time with .mp4 videos. I typically use ffmpeg to convert to .avi format. The code below works for videos from my Android phone
ffmpeg -i video.mp4 -pix_fmt nv12 -f avi -vcodec rawvideo converted_video.avi
We made an interactive design tool, which will be described in a forthcoming paper. The bits of code that I was responsible for allowed us to convert 2D black & white images into 3D-printable files. In our case, the 2D images were MATLAB arrays, but the code can be applied to other formats with slight modification.
Expand for full code
# I/O
import scipy.io
import numpy as np
from google.colab import output
from stl import mesh
from skimage import measure
import matplotlib.pyplot as plt
matlabDictionary = scipy.io.loadmat(path + filename)
data = matlabDictionary['array']
filteredData = np.round(data) #Convert to a binary image
# Take a look at the image
x = np.linspace(0,data.shape[0],data.shape[0])
y = np.linspace(0,data.shape[1],data.shape[1])
xx, yy = np.meshgrid(x,y)
cMap = 'Greys'
plt.figure()
plt.pcolormesh(xx,yy,np.transpose(data),cmap=cMap)
# Convert to 3D using the extrude method (use this OR the revolve method below, not both)
threeD = np.repeat(data[:, :, np.newaxis], data.shape[1], axis=2)
threeD_grey[:,:,-1] = 0 # Close off front and back of chair
threeD_grey[:,:,0] = 0 # Close off front and back of chair
# Convert to 3D using the revolve method (use this OR the extrude method anove, not both)
def revolve_2d_to_3d(array2d):
array2d = array2d.transpose()
# Check if the input is a 2D array
if len(array2d.shape) != 2:
raise ValueError("Input should be a 2D array")
# Get the dimensions of the 2D array
rows, cols = array2d.shape
# Initialize a 3D array of zeros with shape (rows, cols, cols)
array3d = np.zeros((rows, cols, cols))
# Calculate the center row index
center_row = rows // 2
# Loop through each row and column of the 2D array
for i in range(rows):
for j in range(cols):
# Calculate the value at the (i, j) position in 2D array
value = array2d[i, j]
# Loop through a circle in the 3D array and set the value
for theta in range(360):
angle = np.radians(theta)
z = int(center_row + (i - center_row) * np.cos(angle))
y = int(center_row + (i - center_row) * np.sin(angle))
# Ensure the indices are in bounds
if 0 <= z < rows and 0 <= y < cols:
array3d[z, j, y] = value
return array3d
threeD = revolve_2d_to_3d(data)
# Use scikit image to do all of the fancy edge detection and turn it into a volume (https://youtu.be/HNJduYogxh4)
verts, faces, normals, values = measure.marching_cubes(np.transpose(threeD))
# Convert the faces into a mesh and then stl
obj_3d = mesh.Mesh(np.zeros(faces.shape[0],dtype=mesh.Mesh.dtype))
for i, f in enumerate(faces):
obj_3d_g.vectors[i] = verts[f]
obj_3d_g.save('part.stl')
I also made some functions so that the 3D designs could be rendered in a Jupyter Dash app:
Expand for full code
# Create a Dash application
app = JupyterDash(__name__)
# Define the 3D surface plot
mesh = go.Mesh3d(
x=verts[:, 0],
y=verts[:, 1],
z=verts[:, 2],
i=faces[:, 0],
j=faces[:, 1],
k=faces[:, 2],
color='red',
opacity=1.0,
)
# colorscale='Viridis'
# intensity=values,
# Define layout for the 3D plot
layout = go.Layout(
margin=dict(t=0, b=0, l=0, r=0),
scene=dict(
xaxis = dict(visible=False),
yaxis = dict(visible=False),
zaxis =dict(visible=False),
camera=dict(
eye=dict(x=1.6, y=1.1, z=1.0)
)
)
)
data = [mesh]
plot_figure = go.Figure(data=data, layout=layout)
# Define the layout
app.layout = html.Div([
dcc.Graph(figure=plot_figure)
])
# Run the app
app.run_server(mode='inline')