Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
64 changes: 60 additions & 4 deletions xpsi/Data.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,14 @@ class and any derived classes must therefore store information required for
:param int last:
The index of the last row of the loaded response matrix containing
events (see note below).

:param bool grpdata:
Check if the spectra counts have been grouped into larger channels. Set
by default to False.

:param fits.reader or None origpha:
If grpdata=True, this tool allows to get access to the original corrected spectrum
before grouping.

.. note::

Expand Down Expand Up @@ -93,7 +101,7 @@ class and any derived classes must therefore store information required for
The exposure time, in seconds, to acquire this set of event data.

"""
def __init__(self, counts, channels, phases, first, last, exposure_time):
def __init__(self, counts, channels, phases, first, last, exposure_time, grpdata=False, origpha=None):

if not isinstance(counts, _np.ndarray):
try:
Expand Down Expand Up @@ -142,6 +150,13 @@ def __init__(self, counts, channels, phases, first, last, exposure_time):
self._exposure_time = float(exposure_time)
except TypeError:
raise TypeError('Exposure time must be a float.')

self._grpdata = grpdata

if self._grpdata==True:
self._origpha=origpha
else:
self._origpha=None

@property
def exposure_time(self):
Expand Down Expand Up @@ -473,11 +488,14 @@ def from_pha( cls, path,
with fits.open( path ) as hdul:
Header = hdul['SPECTRUM'].header
spectrum = hdul['SPECTRUM'].data

# Extract useful data
exposure = _np.double( Header['EXPOSURE'] )
channel_data = spectrum['CHANNEL']
counts_data = spectrum['COUNTS']
TLMIN= _np.int32(Header['TLMIN1'])
grpdata=False
origpha=None

# No channels specified, use everything
if channels is None:
Expand All @@ -488,6 +506,42 @@ def from_pha( cls, path,
min_channel = channels[0]
max_channel = channels[-1]

if 'QUALITY' in spectrum.columns.names :
print('Bad bins detected...')
quality=spectrum['QUALITY']
qualchannels = _np.where(quality==0)
spectrum = spectrum[qualchannels]
print(r'Spectrum reduced to {} channels'.format(_np.shape(qualchannels)[1]))
#channel_data = spectrum['CHANNEL']
channel_data = _np.arange(TLMIN, TLMIN+len(spectrum['CHANNEL']))
min_channel = _np.min( channel_data )
max_channel = _np.max( channel_data )
channels = _np.arange(min_channel,max_channel+1)
print(channel_data, channels)
counts_data = spectrum['COUNTS']

if 'GROUPING' in spectrum.columns.names :
grpdata = True
origpha=spectrum
print('Counts will be grouped into larger channels')
groupcha=spectrum['GROUPING']
min_binchannel = _np.where(groupcha == 1)[0]
max_binchannel = _np.hstack((min_binchannel[1:], len(channels)))
groupcounts=_np.zeros(len(min_binchannel))
jchan=0
print(channel_data, channels, min_binchannel, max_binchannel, len(origpha))
for i in range(len(min_binchannel)):
while jchan < len(channel_data) and channel_data[jchan]>=min_binchannel[i]+TLMIN and channel_data[jchan]<max_binchannel[i]+TLMIN :
groupcounts[i]=groupcounts[i]+counts_data[jchan]
jchan=jchan+1
grpchans=_np.arange(len(min_binchannel))
print(r'Counts grouped on {} new channels'.format(len(min_binchannel)))
channel_data = grpchans
min_channel = _np.min( channel_data )
max_channel = _np.max( channel_data )
channels = _np.arange(min_channel,max_channel+1)
counts_data = groupcounts

# Get intrinsinc values
first = 0
last = max_channel - min_channel
Expand All @@ -504,7 +558,9 @@ def from_pha( cls, path,
phases=phases,
first=first,
last=last,
exposure_time=exposure )
exposure_time=exposure,
grpdata=grpdata,
origpha=origpha )

# Add useful paths
Data.backscal = Header['BACKSCAL']
Expand Down Expand Up @@ -609,4 +665,4 @@ def plot(self, num_rot = 2, dpi=200, colormap='inferno'):
fig.colorbar( im , ax=ax2 , label='Counts')
fig.set_dpi(dpi)

return fig, axs
return fig, axs
32 changes: 24 additions & 8 deletions xpsi/Instrument.py
Original file line number Diff line number Diff line change
Expand Up @@ -409,12 +409,15 @@ def from_ogip_fits(cls,

# Open useful values in ARF/RMF/RSP
with fits.open( RMF_path ) as RMF_hdul:
RMF_header = RMF_hdul['MATRIX'].header
if 'SPECRESP MATRIX' in RMF_hdul :
RMF_header = RMF_hdul['SPECRESP MATRIX'].header
else :
RMF_header = RMF_hdul['MATRIX'].header
RMF_instr = RMF_header['INSTRUME']
DETCHANS = RMF_header['DETCHANS']
NUMGRP = RMF_header['NAXIS2']
TLMIN = RMF_header['TLMIN4']
TLMAX = RMF_header['TLMAX4']
#TLMAX = RMF_header['TLMAX4']

# Handle the RSP case
if ARF_path is not None:
Expand All @@ -426,16 +429,20 @@ def from_ogip_fits(cls,
max_channel = DETCHANS -1
if max_input == -1:
max_input = NUMGRP

channels = _np.arange( min_channel, max_channel+1 )
inputs = _np.arange( min_input, max_input+1 )

# Perform routine checks
assert min_channel >= TLMIN and max_channel <= TLMAX
assert min_input >= 0 and max_input <= NUMGRP
#assert min_channel >= TLMIN and max_channel <= TLMAX
#assert min_input >= 0 and max_input <= NUMGRP

# If everything in order, get the data
with fits.open( RMF_path ) as RMF_hdul:
RMF_MATRIX = RMF_hdul['MATRIX'].data
if 'SPECRESP MATRIX' in RMF_hdul :
RMF_MATRIX = RMF_hdul['SPECRESP MATRIX'].data
else :
RMF_MATRIX = RMF_hdul['MATRIX'].data
RMF_EBOUNDS = RMF_hdul['EBOUNDS'].data

# Get the channels from the data
Expand All @@ -457,8 +464,17 @@ def from_ogip_fits(cls,

if n_chan == 0:
continue

RMF[f_chan:f_chan+n_chan,i] += RMF_line[n_skip:n_skip+n_chan]

if TLMIN == 1 :
if RMF_line.shape==(): #if RMF_line has only one value, like for a diagonal matrix.
RMF[f_chan-1:f_chan-1+n_chan,i] += RMF_line
else:
RMF[f_chan-1:f_chan-1+n_chan,i] += RMF_line[n_skip:n_skip+n_chan]
else :
if RMF_line.shape==():
RMF[f_chan:f_chan+n_chan,i] += RMF_line
else:
RMF[f_chan:f_chan+n_chan,i] += RMF_line[n_skip:n_skip+n_chan]
n_skip += n_chan

# Make the RSP, depending on the input files
Expand Down Expand Up @@ -498,4 +514,4 @@ def from_ogip_fits(cls,
Instrument.ARF = ARF_area[min_input-1:max_input][~empty_inputs]
Instrument.name = RMF_instr

return Instrument
return Instrument
29 changes: 29 additions & 0 deletions xpsi/Signal.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
__all__ = ["Signal", "LikelihoodError"]

from xpsi.global_imports import *
from scipy import sparse

from xpsi.Data import Data
from xpsi.Instrument import Instrument, ChannelError
Expand All @@ -10,6 +11,7 @@
from xpsi.tools.energy_integrator import energy_integrator
#from xpsi.tools.energy_interpolator import energy_interpolator
#from xpsi.tools.phase_integrator import phase_integrator
#import sparse

from abc import abstractmethod
from xpsi.Parameter import Parameter
Expand Down Expand Up @@ -118,6 +120,33 @@ def __init__(self,
raise TypeError('Invalid type for an instrument object.')
else:
self._instrument = deepcopy( instrument )


#Check out if counts data are grouped into larger channels

if self._data._grpdata == True:
numchangrp=_np.where(self._data._origpha['GROUPING']==1)
#print(numchangrp)
numchangrp=_np.hstack((numchangrp[0], len(self._data._origpha)))
rows = []
cols = []
dtint = []
for i in range(len(numchangrp[1:])):
for j in range(numchangrp[:-1][i], numchangrp[1:][i]):
rows.append(i)
cols.append(j)
dtint.append(True)
print(numchangrp)
binchannels=numchangrp[1:]-numchangrp[:-1]
grpRSP=sparse.coo_matrix((dtint, (rows, cols)), shape=(len(numchangrp[1:]), len(self._instrument.matrix)))
newRSP=grpRSP @ self._instrument.matrix
self._instrument.matrix=newRSP
self._instrument.channels=self._data.channels
neweminchan=self._instrument.channel_edges[numchangrp[:-1]]
newemaxchan=self._instrument.channel_edges[numchangrp[1:]]
self._instrument.channel_edges=_np.hstack((neweminchan[0], newemaxchan))



# Trimming the data and response so they fit together
if min_channel != 0 or max_channel != -1:
Expand Down