Skip to content

Commit

Permalink
Merge branch 'master' into master
Browse files Browse the repository at this point in the history
  • Loading branch information
harripd authored May 19, 2024
2 parents 33e42c7 + 1290c6b commit 7ed3981
Show file tree
Hide file tree
Showing 5 changed files with 742 additions and 411 deletions.
123 changes: 123 additions & 0 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
name: Tests

on:
push:
branch:
- master
- setuptools_scm
pull_request:
branch:
- master
jobs:
build:
runs-on: ${{matrix.os}}
strategy:
matrix:
os: [ubuntu-latest, windows-latest, macos-13, macos-14, ]
python-version: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"]
exclude:
- os: macos-14
python-version: "3.7"
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
fetch-depth: 0
# # - name: Setup Python ${{matrix.python-version}}
# # uses: actions/setup-python@v4
# # with:
# # python-version: ${{matrix.python-version}}
# # - name: Upgrade pip
# # run: python -m pip install --upgrade pip
# # - name: Windows 3.6 Oddities
# # if: matrix.python-version == 3.6 && runner.os == 'Windows'
# # run: python -m pip install pwintypy==1.1.6
- name: Install Conda
uses: conda-incubator/setup-miniconda@v3
with:
auto-update-conda: true
channels: conda-forge
python-version: ${{matrix.python-version}}
activate-environment: test
- name: Apple Silicon oddities
if: matrix.os == 'macos-14'
run: |
brew install c-blosc hdf5
export HDF5_DIR=/opt/homebrew/opt/hdf5
export BLOSC_DIR=/opt/homebrew/opt/c-blosc
# # - name: Install Conda (old fashioned way)
# # if: matrix.os == 'macos-14'
# # run: |
# # wget https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -O ~/miniconda.sh
# # bash ~/miniconda.sh -b -p $HOME/miniconda
# # export PATH="$HOME/miniconda/bin:$PATH"
# # source $HOME/miniconda/bin/activate
# # conda config --set always_yes yes --set changeps1 no
# # conda init --all
# # conda create -n test python=${{matrix.python-version}}
# # conda activate test
- name: Test conda instalation
shell: bash -l {0}
run: conda info
- name: Install Dependencies
shell: bash -l {0}
run: |
python -m pip install --upgrade pip
python -m pip install setuptools build
# python -m pip install numpy tables matplotlib pandas
conda install numpy cython pytest
conda install pytables matplotlib pandas numba
- name: build phconvert
shell: bash -l {0}
run: |
pip install .
- name: Download files Unix
shell: bash -l {0}
if: runner.os!= 'Windows'
run: |
cd notebooks/data
wget https://ndownloader.figshare.com/files/3350666 -O dsdna_d7d17_50_50_1.set
wget https://ndownloader.figshare.com/files/3350669 -O dsdna_d7d17_50_50_1.spc
wget https://ndownloader.figshare.com/files/3350672 -O Pre.ht3
wget https://ndownloader.figshare.com/files/3350663 -O 0023uLRpitc_NTP_20dT_0.5GndCl.sm
wget https://github.com/Photon-HDF5/phconvert/files/231343/Cy3.Cy5_diff_PIE-FRET.ptu.zip
unzip Cy3.Cy5_diff_PIE-FRET.ptu.zip
wget https://ndownloader.figshare.com/files/6955091 -O 161128_DM1_50pM_pH74.ptu
wget https://ndownloader.figshare.com/files/14828594 -O 20161027_DM1_1nM_pH7_20MHz1.ptu
wget https://ndownloader.figshare.com/files/13675271 -O TestFile_2.ptu
wget https://ndownloader.figshare.com/files/14850533 -O trace_T2_300s_1_coincidence.ptu
wget https://ndownloader.figshare.com/files/14890535 -O nanodiamant_histo.phu
wget https://github.com/dwaithe/FCS_point_correlator/raw/master/focuspoint/topfluorPE_2_1_1_1.pt3
wget https://github.com/Photon-HDF5/phconvert/files/1380341/DNA_FRET_0.5nM.pt3.zip
unzip DNA_FRET_0.5nM.pt3.zip
wget https://github.com/Photon-HDF5/phconvert/files/1336330/data.zip
unzip data.zip
cd ../..
- name: Downlid files Windows
shell: bash -l {0}
if: runner.os == 'Windows'
run: |
cd notebooks/data
curl.exe -L --output Cy3.Cy5_diff_PIE-FRET.ptu.zip --url https://github.com/Photon-HDF5/phconvert/files/231343/Cy3.Cy5_diff_PIE-FRET.ptu.zip
7z e Cy3.Cy5_diff_PIE-FRET.ptu.zip
curl.exe -L --output dsdna_d7d17_50_50_1.set --url https://ndownloader.figshare.com/files/3350666
curl.exe -L --output dsdna_d7d17_50_50_1.spc --url https://ndownloader.figshare.com/files/3350669
curl.exe -L --output Pre.ht3 --url https://ndownloader.figshare.com/files/3350672
curl.exe -L --output 0023uLRpitc_NTP_20dT_0.5GndCl.sm --url https://ndownloader.figshare.com/files/3350663
curl.exe -L --output 161128_DM1_50pM_pH74.ptu --url https://ndownloader.figshare.com/files/6955091
curl.exe -L --output 20161027_DM1_1nM_pH7_20MHz1.ptu --url https://ndownloader.figshare.com/files/14828594
curl.exe -L --output TestFile_2.ptu --url https://ndownloader.figshare.com/files/13675271
curl.exe -L --output trace_T2_300s_1_coincidence.ptu --url https://ndownloader.figshare.com/files/14850533
curl.exe -L --output nanodiamant_histo.phu --url https://ndownloader.figshare.com/files/14890535
curl.exe -L --output topfluorPE_2_1_1_1.pt3 --url https://github.com/dwaithe/FCS_point_correlator/raw/master/focuspoint/topfluorPE_2_1_1_1.pt3
curl.exe -L --output DNA_FRET_0.5nM.pt3.zip --url https://github.com/Photon-HDF5/phconvert/files/1380341/DNA_FRET_0.5nM.pt3.zip
7z e DNA_FRET_0.5nM.pt3.zip
cd ..
cd ..
- name: Test project
shell: bash -l {0}
run: |
pytest
# pip install pyyaml
# python -m tests/nbrun.py /notebooks/*
138 changes: 69 additions & 69 deletions phconvert/bhreader.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,75 +92,75 @@ def load_spc(fname, spc_model='SPC-630'):
(timestamps_unit).
"""

f = open(fname, 'rb')

if ('630' in spc_model) or ('600' in spc_model):

# We first decode the first 6 bytes which is a header...
header = np.fromfile(f, dtype='u2', count=3)
timestamps_unit = header[1] * 0.1e-9
num_routing_bits = np.bitwise_and(header[0], 0x000F) # unused

# ...and then the remaining records containing the photon data
spc_dtype = np.dtype([('field0', '<u2'), ('b', '<u1'), ('c', '<u1'),
('a', '<u2')])
data = np.fromfile(f, dtype=spc_dtype)

nanotime = 4095 - np.bitwise_and(data['field0'], 0x0FFF)
detector = data['c']

# Build the macrotime (timestamps) using in-place operation for efficiency
timestamps = data['b'].astype('int64')
np.left_shift(timestamps, 16, out=timestamps)
timestamps += data['a']

# extract the 13-th bit from data['field0']
overflow = np.bitwise_and(np.right_shift(data['field0'], 13), 1)
overflow = np.cumsum(overflow, dtype='int64')

# Add the overflow bits
timestamps += np.left_shift(overflow, 24)

elif ('SPC-1' in spc_model) or ('SPC-830' in spc_model):
# We first decode the first 4 bytes which is a header...
header = np.fromfile(f, dtype='u4', count=1)[0]
timestamps_unit = np.bitwise_and(header, 0x00FFFFFF) * 0.1e-9
num_routing_bits = np.bitwise_and(np.right_shift(header, 32), 0x78) # unused

# ...and then the remaining records containing the photon data
spc_dtype = np.dtype([('field0', '<u2'), ('field1', '<u2')])
data = np.fromfile(f, dtype=spc_dtype)

nanotime = 4095 - np.bitwise_and(data['field1'], 0x0FFF)
detector = np.bitwise_and(np.right_shift(data['field0'], 12), 0x0F)

# Build the macrotime
timestamps = np.bitwise_and(data['field0'], 0x0FFF).astype(dtype='int64')

# Extract the various status bits
mark = np.bitwise_and(np.right_shift(data['field1'], 12), 0x01)
gap = np.bitwise_and(np.right_shift(data['field1'], 13), 0x01)
overflow = np.bitwise_and(np.right_shift(data['field1'], 14), 0x01).\
astype(dtype='int64')
invalid = np.bitwise_and(np.right_shift(data['field1'], 15), 0x01)

# Invalid bytes: number of overflows from the last detected photon
for i_ovf in np.nonzero(overflow)[0].tolist():
if invalid[i_ovf]:
overflow[i_ovf] = np.left_shift(np.bitwise_and(
data['field1'][i_ovf], 0x0FFF), 16)\
+ data['field0'][i_ovf]

# Each overflow occurs every 2^12 macrotimes
overflow = np.left_shift(np.cumsum(overflow), 12)

# Add the overflow bits
timestamps += overflow

# Delete invalid entries
nanotime = np.delete(nanotime, invalid.nonzero())
timestamps = np.delete(timestamps, invalid.nonzero())
detector = np.delete(detector, invalid.nonzero())
with open(fname, 'rb') as f:

if ('630' in spc_model) or ('600' in spc_model):
# We first decode the first 6 bytes which is a header...
header = np.fromfile(f, dtype='u2', count=3)
timestamps_unit = header[1] * 0.1e-9
num_routing_bits = np.bitwise_and(header[0], 0x000F) # unused
# ...and then the remaining records containing the photon data
spc_dtype = np.dtype([('field0', '<u2'), ('b', '<u1'), ('c', '<u1'),
('a', '<u2')])
data = np.fromfile(f, dtype=spc_dtype)
nanotime = 4095 - np.bitwise_and(data['field0'], 0x0FFF)
detector = data['c']
# Build the macrotime (timestamps) using in-place operation for efficiency
timestamps = data['b'].astype('int64')
np.left_shift(timestamps, 16, out=timestamps)
timestamps += data['a']
# extract the 13-th bit from data['field0']
overflow = np.bitwise_and(np.right_shift(data['field0'], 13), 1)
overflow = np.cumsum(overflow, dtype='int64')
# Add the overflow bits
timestamps += np.left_shift(overflow, 24)
elif ('SPC-1' in spc_model) or ('SPC-830' in spc_model):
# We first decode the first 4 bytes which is a header...
header = np.fromfile(f, dtype='u4', count=1)[0]
timestamps_unit = np.bitwise_and(header, 0x00FFFFFF) * 0.1e-9
num_routing_bits = np.bitwise_and(np.right_shift(header, 32), 0x78) # unused
# ...and then the remaining records containing the photon data
spc_dtype = np.dtype([('field0', '<u2'), ('field1', '<u2')])
data = np.fromfile(f, dtype=spc_dtype)
nanotime = 4095 - np.bitwise_and(data['field1'], 0x0FFF)
detector = np.bitwise_and(np.right_shift(data['field0'], 12), 0x0F)
# Build the macrotime
timestamps = np.bitwise_and(data['field0'], 0x0FFF).astype(dtype='int64')
# Extract the various status bits
mark = np.bitwise_and(np.right_shift(data['field1'], 12), 0x01)
gap = np.bitwise_and(np.right_shift(data['field1'], 13), 0x01)
overflow = np.bitwise_and(np.right_shift(data['field1'], 14), 0x01).\
astype(dtype='int64')
invalid = np.bitwise_and(np.right_shift(data['field1'], 15), 0x01)
# Invalid bytes: number of overflows from the last detected photon
for i_ovf in np.nonzero(overflow)[0].tolist():
if invalid[i_ovf]:
overflow[i_ovf] = np.left_shift(np.bitwise_and(
data['field1'][i_ovf], 0x0FFF), 16)\
+ data['field0'][i_ovf]
# Each overflow occurs every 2^12 macrotimes
overflow = np.left_shift(np.cumsum(overflow), 12)
# Add the overflow bits
timestamps += overflow
# Delete invalid entries
nanotime = np.delete(nanotime, invalid.nonzero())
timestamps = np.delete(timestamps, invalid.nonzero())
detector = np.delete(detector, invalid.nonzero())

return timestamps, detector, nanotime, timestamps_unit

Expand Down
4 changes: 3 additions & 1 deletion phconvert/pqreader.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,8 @@
rtTimeHarp260NT2 = 0x00010205, # (SubID = $00 ,RecFmt: $01) (V1), T-Mode: $02 (T2), HW: $05 (TimeHarp260N)
rtTimeHarp260PT3 = 0x00010306, # (SubID = $00 ,RecFmt: $01) (V1), T-Mode: $03 (T3), HW: $06 (TimeHarp260P)
rtTimeHarp260PT2 = 0x00010206, # (SubID = $00 ,RecFmt: $01) (V1), T-Mode: $02 (T2), HW: $06 (TimeHarp260P)
rtMultiHarpNT3 = 0x00010307, # (SubID = $00 ,RecFmt: $01) (V1), T-Mode: $02 (T3), HW: $07 (MultiHarp150N)
rtMultiHarpNT2 = 0x00010207, # (SubID = $00 ,RecFmt: $01) (V1), T-Mode: $02 (T2), HW: $07 (MultiHarp150N))
)

# Reverse mappings
Expand Down Expand Up @@ -120,7 +122,7 @@ def load_ptu(filename, ovcfunc=None):
detectors, timestamps, nanotimes = process_t3records(
t3records, time_bit=10, dtime_bit=15, ch_bit=6, special_bit=True,
ovcfunc=_correct_overflow_nsync)
elif record_type in ('rtHydraHarp2T2', 'rtTimeHarp260NT2','rtTimeHarp260PT2'):
elif record_type in ('rtHydraHarp2T2', 'rtTimeHarp260NT2','rtTimeHarp260PT2', 'rtMultiHarpNT3'):
detectors, timestamps = process_t2records(t3records,
time_bit=25, ch_bit=6, special_bit=True,
ovcfunc=_correct_overflow_nsync)
Expand Down
49 changes: 49 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
[build-system]
requires = [
"setuptools>=42",
"versioneer"
]
build-backend = "setuptools.build_meta"

[project]
name = "phconvert"
dynamic = ["version"]
authors = [
{name="Antonino Ingargiola", email="tritemio@gmail.com"},
{name="Paul David Harris", email="harripd@gmail.com"}
]
description = "Convert Beker&Hickl, PicoQuant and other formats to Photon-HDF5."
readme = "README.md"
license = {file = "LICENSE.txt"}
keywords = [
"single-molecule FRET",
"smFRET",
"biophysics",
"file-format",
"HDF5",
"Photon-HDF5"
]
classifiers = [
"Intended Audience :: Science/Research",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python:: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Topic :: Scientific/Engineering",
]
requires-python = ">= 3.6"
dependencies = [
"numpy>=1.19",
"tables"
]

[project.urls]
Homepage = "http://photon-hdf5.github.io/phconvert/"
Documentation = "https://phconvert.readthedocs.io/en/latest/"
Repositors = "https://github.com/Photon-HDF5/phconvert"
Issues = "https://github.com/Photon-HDF5/phconvert/issues"
Loading

0 comments on commit 7ed3981

Please sign in to comment.