diff --git a/examples/generate_fake.py b/examples/generate_fake.py index 36bb5b8..b395d2f 100644 --- a/examples/generate_fake.py +++ b/examples/generate_fake.py @@ -26,7 +26,7 @@ b'nbits': 8 } -filename = './examples/pspm.fil' +filename = './examples/pspm32.fil' # generate a fake filterbank file generate_file(filename, header) diff --git a/examples/run_pipeline.py b/examples/run_pipeline.py index c9e4e7e..a471122 100644 --- a/examples/run_pipeline.py +++ b/examples/run_pipeline.py @@ -11,7 +11,7 @@ # init filterbank filename fil_name = os.path.abspath("./pspm32.fil") # init filterbank sample size -sample_size = 192 +sample_size = 512 # init times the pipeline should run n_times = 10 diff --git a/examples/sum_harmonics.py b/examples/sum_harmonics.py new file mode 100644 index 0000000..1f05961 --- /dev/null +++ b/examples/sum_harmonics.py @@ -0,0 +1,60 @@ +import filterbank.filterbank as filterbank +import matplotlib.pyplot as plt +import pandas as pd +from pandas import DataFrame +from harmonic_summing import harmonic_summing as harmsum +from plot import waterfall, waterfall_plot +import pylab as pyl + +fb = filterbank.Filterbank(filename='./pspm32.fil', read_all=True) +# get filertbank data + frequency labels + +freqs, fil_data = fb.select_data() +harmsum = harmsum.apply_harmonic_summing(frequencies=freqs, fil_data=fil_data, precision=0.001, num_lo_up_harmonics=(5, 5)) +print(harmsum) + +freqs, fil_data = harmsum + + +# Create DataFrame with the fil_data changed and the freqs +fil_dataframe = pd.DataFrame(fil_data, columns=freqs) + +list_num = [x for x in range(len(fil_data))] +print(list_num) +fil_dataframe['Sample'] = list_num + +# Get amplitudes of freq +ampl = fil_dataframe.sum(axis=0, skipna=True) +print(ampl.size) +inten = [] + +# Store in inten the amplitudes for each freq +# -1 since dataframe has 'Sample' column wich we don't need here +for i in range(ampl.size - 1): + inten.append(ampl[i]) +print(inten) + +# Create datatFrame with freq and their amplitudes +ampl_freq = {'frequencies': freqs, 'Intensity': inten} +a = DataFrame(ampl_freq, columns=['frequencies', 'Intensity']) +print(a) + +# Plot the dataFrame +a.plot(x='frequencies', y='Intensity', kind='line') +plt.show() + + + + +# Waterfall +data, extent = waterfall_plot(fil_data, freqs) +img = plt.imshow(data.T, + aspect='auto', + origin='lower', + rasterized=True, + interpolation='nearest', + extent=extent, + cmap='cubehelix') +plt.show() + + diff --git a/examples/visualize_dispersed.py b/examples/visualize_dispersed.py index f95ecb3..1d8007a 100644 --- a/examples/visualize_dispersed.py +++ b/examples/visualize_dispersed.py @@ -10,7 +10,7 @@ sys.path.insert(0,PARENT_DIR) -filter_bank = Filterbank(filename='./pspm32.fil') +filter_bank = Filterbank(filename='../data/pspm8.fil') # Calculate the center frequency with the data in the header center_freq = filter_bank.header[b'center_freq'] diff --git a/examples/visualize_filterbank_psd.py b/examples/visualize_filterbank_psd.py index fec86ce..9273888 100644 --- a/examples/visualize_filterbank_psd.py +++ b/examples/visualize_filterbank_psd.py @@ -17,7 +17,7 @@ from filterbank.filterbank import Filterbank # Instatiate the filterbank reader and point to the filterbank file -fb = Filterbank(filename='./pspm32.fil', read_all=True) +fb = Filterbank(filename='../data/pspm32.fil', read_all=True) # read the data in the filterbank file f, samples = fb.select_data() @@ -29,7 +29,8 @@ # Get the powerlevels and the frequencies print(samples[0]) power_levels, freqs, _ = opsd(samples[0], nfft=128, sample_rate=80, sides='twosided') - +print(power_levels) +print(len(power_levels)) # Plot the PSD plt.grid(True) plt.xlabel('Frequency (MHz)') diff --git a/examples/visualize_psd.py b/examples/visualize_psd.py index bc2a744..edfaa66 100644 --- a/examples/visualize_psd.py +++ b/examples/visualize_psd.py @@ -41,7 +41,7 @@ power_levels = 10 * np.log10(PXX/(sdr.sample_rate/1e6)) # Add the center frequency to the frequencies so it matches the actual frequencies -freqs = freqs + sdr.center_freq/1e6 +freqs = freqs + sdr.center_freq/1e62 # Plot the PSD plt.plot(freqs, power_levels) diff --git a/examples/visualize_waterfall_stream.py b/examples/visualize_waterfall_stream.py index 7d93953..a71b792 100644 --- a/examples/visualize_waterfall_stream.py +++ b/examples/visualize_waterfall_stream.py @@ -13,9 +13,9 @@ fb = Filterbank(filename='./pspm32.fil') -wf = waterfall.Waterfall(fb=fb, fig=pyl.figure(), mode="stream") +wf = waterfall.Waterfall(filter_bank=fb, fig=pyl.figure(), mode="stream") fig, update, frames, repeat = wf.animated_plotter() ani = animation.FuncAnimation(fig, update, frames=frames,repeat=repeat) -pyl.show() +pyl.show(block=True) diff --git a/filterbank/filterbank.py b/filterbank/filterbank.py index ccae63b..75a16ed 100644 --- a/filterbank/filterbank.py +++ b/filterbank/filterbank.py @@ -56,7 +56,6 @@ def __init__(self, filename, freq_range=None, time_range=None, read_all=False): if read_all: self.read_filterbank() - def read_filterbank(self): """ Read filterbank file and transform to tuple of 3 matrices: @@ -70,7 +69,7 @@ def read_filterbank(self): for i_i in range(self.n_samples): for j_j in range(self.n_ifs): self.fil.seek(self.n_bytes * self.i_0, 1) - # add to matrix + # add to matrixº self.data[i_i, j_j] = np.fromfile(self.fil, count=self.n_chans_selected, dtype=self.dd_type) # skip bytes till start of next chunk @@ -78,7 +77,6 @@ def read_filterbank(self): # release file resources self.fil.close() - def next_row(self): """ Read filterbank file per row diff --git a/harmonic_summing/__init__.py b/harmonic_summing/__init__.py new file mode 100644 index 0000000..4ae9e0a --- /dev/null +++ b/harmonic_summing/__init__.py @@ -0,0 +1,4 @@ +""" + Export harmonic_summing methods +""" +from .harmonic_summing import apply_harmonic_summing diff --git a/harmonic_summing/harmonic_summing.py b/harmonic_summing/harmonic_summing.py new file mode 100644 index 0000000..e2a6158 --- /dev/null +++ b/harmonic_summing/harmonic_summing.py @@ -0,0 +1,100 @@ + +import pandas as pd +import numpy as np + +# uncomment following options to print dataframes in their entirety +# pd. set_option('display.max_rows', 10000) +# pd.set_option('display.max_columns', 10000) + +# set number of harmonics to look for + + +def find_nearest(array, value): + idx = (np.abs(array - value)).argmin() + return array[idx] + +def match_and_add_harmonics(fil_dataframe, frequencies, most_pwrful_freq, perfect_harms, precision): + """ + this function is used by apply_harmonic_summing(). + It finds the nearest recorded harmonics in the data frame to the to + the calculated 'perfect' harmonics (calculated in apply_harmonic_summing()) + and adds the intensity to the supposed funamental component. + :param fil_dataframe: data frame of the filterbank data. + :param frequencies: frequency labels of the filterbank data. + :param most_pwrful_freq: overall most powerful frequency component. + :param perfect_harms: calculated perfect harmonics. + :param precision: a decimal number e.g. 0.001 which determines the threshold for 'finding' + the nearest recorded frequency or concluding that the frequency is not present in the + filterbank data. + """ + for i in range(len(perfect_harms)): + harms = np.empty([len(perfect_harms)]) + harms[i] = find_nearest(frequencies, perfect_harms[i]) + if abs(harms[i] - perfect_harms[i]) < (perfect_harms[i] * precision): + fil_dataframe[most_pwrful_freq] = fil_dataframe[most_pwrful_freq] + fil_dataframe[harms[i]] + fil_dataframe[harms[i]] = 0 + else: + print('no harmonic close to ' + str(perfect_harms[i])) + + +def apply_harmonic_summing(frequencies, fil_data, precision, num_lo_up_harmonics): + """ + Top level function of this file. + Converts imported filterbank (fourier transformed) data to pandas DataFrame, + detects the most intense frequency component, calculates upper and lower harmonic frequencies, + finds recorded frequencies which are nearest to the calculated harmonic frequencies (if within + range specified by 'precision'), + adds the sample values of the matching frequency components to the fundamental and zeros matching + components' values out. + :param frequencies: frequency labels from filterbank data. + :param fil_data: data frame of the filterbank data. + :param precision: a decimal number e.g. 0.001 which determines the threshold for 'finding' + the nearest recorded frequency or concluding that the frequency is not present in the + filterbank data. + :param num_lo_up_harmonics: tuple of 2 integers specifying the number of lower and upper + harmonics to find and add to the fundamental. + :return: a tuple of processed filterbank data, same format as output of filterbank.read_all() + """ + + num_lower_harmonics, num_upper_harmonics = num_lo_up_harmonics + + # subtract 226 FOR TESTING ONLY + frequencies -= 426 + frequencies = frequencies.round(3) + + # create dataframe with each channel as column (frequencies as column labels) + fil_dataframe = pd.DataFrame(fil_data, columns=frequencies).abs() + + # print(fil_dataframe) + + # find the overall most powerful frequency + most_pwrful_freq = fil_dataframe.sum().idxmax(axis=0, skipna=True) + print('Most powerful frequency = ', str(most_pwrful_freq)) + print('Sum amplitude of mpf = ', fil_dataframe.sum().max()) + + # create empty array which we'll soon use for the lower harmonics + low_perfect_harms = np.empty([num_lower_harmonics]) + high_perfect_harms = np.empty([num_upper_harmonics]) + + # calculate the perfect lower harmonic frequencies + multiplier = 1 + for i in range(len(low_perfect_harms)): + multiplier = multiplier / 2 + low_perfect_harms[i] = most_pwrful_freq * multiplier + + # print(low_perfect_harms) + + # calculate the perfect upper harmonic frequencies + for i in range(len(high_perfect_harms)): + high_perfect_harms[i] = most_pwrful_freq + (most_pwrful_freq * (i + 1)) + + # print(high_perfect_harms) + + # find the closest recorded frequencies to the calculated harmonics + # add each sample of the recorded harmonics to the fundamental frequency's samples. set harmonics' samples to zero. + # for high harmonics + match_and_add_harmonics(fil_dataframe, frequencies, most_pwrful_freq, high_perfect_harms, precision) + # for low harmonics + match_and_add_harmonics(fil_dataframe, frequencies, most_pwrful_freq, low_perfect_harms, precision) + + return frequencies, fil_dataframe.to_numpy() diff --git a/newnewsumhrm.f b/newnewsumhrm.f new file mode 100644 index 0000000..a214b8a --- /dev/null +++ b/newnewsumhrm.f @@ -0,0 +1,102 @@ +C ************************************************************** + SUBROUTINE NEWNEWSUMHRM (SP,NF,NF1,P1,P2,P4,P8,P16) +C ************************************************************** + IMPLICIT NONE + INTEGER NF, NF1 + REAL SP(NF) + INTEGER ifold + REAL fval + INTEGER NFS + REAL X, XDIV + INTEGER IDX, LSTIDX + INTEGER I, N, NFOLDS + INTEGER FOLDVALS(5) + REAL P1(*),P2(*),P4(*),P8(*),P16(*) + + NFOLDS=5 + FOLDVALS(1)=1 + FOLDVALS(2)=2 + FOLDVALS(3)=4 + FOLDVALS(4)=8 + FOLDVALS(5)=16 + +c SPH(npts) + + do ifold=1,nfolds + fval = foldvals(ifold) + if (fval.eq.1) then + do n=1,nf + p1(n) = sp(n) + enddo + endif + if (fval.eq.2) then + NFS = MAX(1,MIN(foldvals(ifold)*NF1-foldvals(ifold)/2,NF)) + XDIV = 1./fval + do N=NFS,NF + p2(N)=0. + LSTIDX = -1 + do I=1,foldvals(ifold) + X = N * I * XDIV + 0.5 + IDX = X + IF (IDX.GT.1.AND.IDX.NE.LSTIDX) THEN + p2(N) = p2(N) + SP(IDX) + ENDIF + LSTIDX = IDX + enddo + enddo + endif + if (fval.eq.4) then + NFS = MAX(1,MIN(foldvals(ifold)*NF1-foldvals(ifold)/2,NF)) + XDIV = 1./fval + do N=NFS,NF + p4(N)=0. + LSTIDX = -1 + do I=1,foldvals(ifold) + X = N * I * XDIV + 0.5 + IDX = X + IF (IDX.GT.1.AND.IDX.NE.LSTIDX) THEN + p4(N) = p4(N) + SP(IDX) + ENDIF + LSTIDX = IDX + enddo + enddo + endif + if (fval.eq.8) then + NFS = MAX(1,MIN(foldvals(ifold)*NF1-foldvals(ifold)/2,NF)) + XDIV = 1./fval + do N=NFS,NF + p8(N)=0. + LSTIDX = -1 + do I=1,foldvals(ifold) + X = N * I * XDIV + 0.5 + IDX = X + IF (IDX.GT.1.AND.IDX.NE.LSTIDX) THEN + p8(N) = p8(N) + SP(IDX) + ENDIF + LSTIDX = IDX + enddo + enddo + endif + if (fval.eq.16) then + NFS = MAX(1,MIN(foldvals(ifold)*NF1-foldvals(ifold)/2,NF)) + XDIV = 1./fval + do N=NFS,NF + p16(N)=0. + LSTIDX = -1 + do I=1,foldvals(ifold) + X = N * I * XDIV + 0.5 + IDX = X + IF (IDX.GT.1.AND.IDX.NE.LSTIDX) THEN + p16(N) = p16(N) + SP(IDX) + ENDIF + LSTIDX = IDX + enddo + enddo + endif + enddo +c + RETURN +C +C END OF SUBROUTINE NEWNEWSUMHRM +C + END diff --git a/newnewsumhrm.py b/newnewsumhrm.py new file mode 100644 index 0000000..5f0689c --- /dev/null +++ b/newnewsumhrm.py @@ -0,0 +1,70 @@ + + +def newnewsumhrm(SP,NF,NF1,P1,P2,P4,P8,P16): + # INTEGER NF, NF1 + # REAL SP(NF) list sp with size nf + # INTEGER ifold + # REAL fval + # INTEGER NFS + # REAL X, XDIV + # INTEGER IDX, LSTIDX + # INTEGER I, N, NFOLDS + # INTEGER FOLDVALS(5) list FOLDVALS with size 5 + # REAL P1(*), P2(*), P4(*), P8(*), P16(*) lists with unknown size + + NFOLDS = 5 + FOLDVALS = [1,2,4,8,16] + for ifold in range(NFOLDS): + fval = FOLDVALS[ifold] + if fval == 1: + for n in range(NF): + P1[n] = SP[n] + if fval == 2: + NFS = min(NF) + XDIV = 1./fval + for n in range(NFS, NF): + P2[n] = 0 + LSTIDX = -1 + for i in range(1, FOLDVALS[ifold]): + X = n * i * XDIV + 0.5 + IDX = X + if (IDX > 1) and (IDX != LSTIDX): + P2[n] = P2[n] + SP[IDX] + LSTIDX = IDX + if fval == 4: + NFS = min(NF) + XDIV = 1./fval + for n in range(NFS, NF): + P4[n] = 0. + LSTIDX = -1 + for i in range(1, FOLDVALS[ifold]): + X = n * i * XDIV + 0.5 + IDX = X + if (IDX > 1) and (IDX != LSTIDX): + P4[n] = P4[n] + SP[IDX] + LSTIDX = IDX + if fval == 8: + NFS = min(NF) + XDIV = 1. / fval + for n in range(NFS, NF): + P8[n] = 0. + LSTIDX = -1 + for i in range(1, FOLDVALS[ifold]): + X = n * i * XDIV + 0.5 + IDX = X + if (IDX > 1) and (IDX != LSTIDX): + P8[n] = P8[n] + SP[IDX] + LSTIDX = IDX + if fval == 16: + NFS = min(NF) + XDIV = 1. / fval + for n in range(NFS, NF): + P16[n] = 0. + LSTIDX = -1 + for i in range(1, FOLDVALS[ifold]): + X = n * i * XDIV + 0.5 + IDX = X + if (IDX > 1) and (IDX != LSTIDX): + P16[n] = P16[n] + SP[IDX] + LSTIDX = IDX + return diff --git a/pipeline/pipeline.py b/pipeline/pipeline.py index f77b22e..f533826 100644 --- a/pipeline/pipeline.py +++ b/pipeline/pipeline.py @@ -5,6 +5,7 @@ import os import sys import inspect + CURRENT_DIR = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) PARENT_DIR = os.path.dirname(CURRENT_DIR) sys.path.insert(0, PARENT_DIR) @@ -15,6 +16,7 @@ import dedisperse import fourier + # pylint: disable=too-many-locals # pylint: disable=too-many-arguments # pylint: disable=invalid-name @@ -46,7 +48,6 @@ def __init__(self, filename=None, as_stream=False, DM=230, scale=3, n=None, size file.write(str(result) + ",") file.close() - def read_rows(self, filename): """ Read the filterbank data as stream @@ -62,7 +63,6 @@ def read_rows(self, filename): time_stop = timer() - time_start return time_stop - def read_n_rows(self, n, filename, DM, scale): """ Read the filterbank data as stream @@ -85,7 +85,6 @@ def read_n_rows(self, n, filename, DM, scale): stopwatch_list.append(stopwatch) return stopwatch_list - def read_static(self, filename, DM, scale, size): """ Read the filterbank data at once @@ -106,13 +105,11 @@ def read_static(self, filename, DM, scale, size): stopwatch = self.measure_methods(stopwatch, fil_data, freqs, DM, scale) return stopwatch - def measure_methods(self, stopwatch, fil_data, freqs, DM, scale): """ Run and time all methods/modules """ # clipping - time_clipping = timer() _, _ = clipping.clipping(freqs, fil_data) stopwatch['time_clipping'] = timer() - time_clipping @@ -122,7 +119,8 @@ def measure_methods(self, stopwatch, fil_data, freqs, DM, scale): stopwatch['time_dedisp'] = timer() - time_dedisp # timeseries time_t_series = timer() - time_series = timeseries.Timeseries(fil_data) + time_series = timeseries.Timeseries() + time_series = time_series.from_filterbank_data(fil_data) stopwatch['time_t_series'] = timer() - time_t_series # downsample time_downsamp = timer() diff --git a/plot/waterfall.py b/plot/waterfall.py index b2788d5..4d89a85 100644 --- a/plot/waterfall.py +++ b/plot/waterfall.py @@ -14,7 +14,7 @@ class Waterfall(): # All these attributes are needed. def __init__(self, filter_bank=None, center_freq=None, sample_freq=None, fig=None, scans_per_sweep=None, - max_n_rows=1024, mode='stream', t_obs=None): + max_n_rows=10, mode='stream', t_obs=None): """ Setup waterfall object """ @@ -47,7 +47,7 @@ def __init__(self, filter_bank=None, center_freq=None, sample_freq=None, else: freqs = filter_bank.get_freqs() self.samples = self.filter_bank.next_n_rows(self.max_n_rows) - + print(self.samples) self.freqs = np.asarray(freqs) diff --git a/requirements.txt b/requirements.txt index e9a91b7..93b1a80 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,3 @@ +pandas pytest numpy diff --git a/seek/search.py b/seek/search.py new file mode 100644 index 0000000..8abc112 --- /dev/null +++ b/seek/search.py @@ -0,0 +1,101 @@ +import filterbank.filterbank as filterbank +import matplotlib.pyplot as plt +from harmonic_summing import harmonic_summing as harmsum +from timeseries.timeseries import Timeseries +import pandas as pd +import numpy as np +from plot import waterfall_plot +from clipping import clipping + +fb = filterbank.Filterbank(filename='../pspm32.fil', read_all=True) +print(fb.get_header()) +# Get time between values of sample +header = fb.get_header() +tsamp = header[b'tsamp'] +fch1 = header[b'fch1'] +nifs = header[b'nifs'] +foff = header[b'foff'] +nchans = header[b'nchans'] +# observation time (default) +tobs = 10 + +nsamples = tobs / tsamp +freqs, fil_data = fb.select_data() +print("----------something-------------") +N = nifs * nchans * nsamples +print('N = ' + str(N)) +print(freqs) +print(fil_data[1]) +skyfreq = fch1 + fil_data[1] * foff +# print('skyfreq:') +# print(skyfreq) +# Set absolute values of Fil_data, since we don't know what negative values mean +fil_dataframe = pd.DataFrame(fil_data, columns=freqs).abs() +most_pwrful_freq2 = fil_dataframe.sum().idxmax(axis=0, skipna=True) +columnsData2 = fil_dataframe.loc[:, most_pwrful_freq2] +fundamental2 = columnsData2.to_numpy() +print("--------------fundamental FROM FILTERBANK-----------------") +print(fundamental2) +fil_data2 = fil_dataframe.to_numpy() +harmsum1 = harmsum.apply_harmonic_summing(frequencies=freqs, fil_data=fil_data2, precision=0.001, + num_lo_up_harmonics=(5, 5)) +freqs1, samples1 = harmsum1 +fil_dataframe2 = pd.DataFrame(samples1, columns=freqs1) + +most_pwrful_freq = fil_dataframe2.sum().idxmax(axis=0, skipna=True) +print(most_pwrful_freq2) + +# Get Sample of the fundamental +columnsData = fil_dataframe2.loc[:, most_pwrful_freq] +fundamental = columnsData.to_numpy() +print("--------------fundamental FROM HARMONIC SUMMING-----------------") +print(fundamental) +ax = plt.gca() +columnsData.plot(kind='line', x='Sample', y=most_pwrful_freq, ax=ax) +plt.xlabel('time (us)', fontsize=12) +plt.ylabel('Intensity', fontsize=12) +plt.show() + + +# Plot fundamental2 +# time_series = Timeseries(fundamental2) +# plt.subplot(2,1,2) +# plt.plot(time_series.timeseries) +# plt.xlabel('time (us)', fontsize=12) +# plt.ylabel('Intensity', fontsize=12) +# plt.show() + +# Plot fundamental +# time_series1 = Timeseries(fundamental) +# plt.subplot(2,1,2) +# plt.plot(time_series1.timeseries) +# plt.xlabel('time', fontsize=12) +# plt.ylabel('Intensity', fontsize=12) +# plt.show() +def get_fundamental(frequencies, filterbank_data): + fil_dataframe = pd.DataFrame(filterbank_data, columns=frequencies) + most_pwrful_freq = fil_dataframe.sum().idxmax(axis=0, skipna=True) + columnsData = fil_dataframe2.loc[:, most_pwrful_freq] + fundamental = columnsData.to_numpy() + ax = plt.gca() + columnsData.plot(kind='line', x='Sample', y=most_pwrful_freq, ax=ax) + plt.xlabel('time (us)', fontsize=12) + plt.ylabel('Intensity', fontsize=12) + plt.show() + return fundamental + + +def search_periodicity(fundamental): + fundamental2 = np.copy(fundamental) + fundamental2[::-1].sort() + max_values = [] + for i in range(2): + max_values.append(fundamental2[i]) + + x = np.where(fundamental == max_values[0]) + y = np.where(fundamental == max_values[1]) + time = tsamp * x - tsamp * y + return time + + +search_periodicity(fundamental) diff --git a/seek/seek.py b/seek/seek.py new file mode 100644 index 0000000..4c3ea30 --- /dev/null +++ b/seek/seek.py @@ -0,0 +1,167 @@ +from clipping import clipping +from dedisperse import dedisperse +from dedisperse.dedisperse import find_estimation_intensity +from plot import waterfall_plot + +from timeseries.timeseries import Timeseries +import filterbank.filterbank as filterbank +import matplotlib.pyplot as plt +from harmonic_summing import harmonic_summing as harmsum +from timeseries.timeseries import Timeseries +from pandas import DataFrame +import pandas as pd +size = 512 +fb = filterbank.Filterbank(filename='./fake_pulsar_C32_snr1_4.fil', read_all=True) +# get filertbank data + frequency labels +print(fb.get_header()) +freqs, fil_data = fb.select_data() + +# Transform filterbank data into dataframe +fil_dataframe = pd.DataFrame(fil_data, columns=freqs).abs() +fd = fil_dataframe.max(axis=0).max() +print(fd) +print("<---------------------------------------------------->") +fil_data2 = fil_dataframe.to_numpy() +print(fil_data2) +print("<---------------------------------------------------->") + +# Apply Clipping +new_freqs, new_samples = clipping(freqs, fil_data2) +print("<----------------------Samples from Clipping------------------------------>") +print(new_samples) +# print(new_freqs) +print("<---------------------------------------------------->") + +# Apply Dedisperse TODO doesn't Work dedisperse method +# deds = dedisperse(new_samples, 235) +# print(deds) +# average = find_estimation_intensity(new_samples,5) +# print("Average" + str(average)) +# print("<---------------------------------------------------->") + +# Apply harmonic summing on fb data +harmsum1 = harmsum.apply_harmonic_summing(frequencies=freqs, fil_data=fil_data2, precision=0.001, num_lo_up_harmonics=(5, 5)) +# print(harmsum) +freqs2, samples2 = harmsum1 + +# Apply Harmonic summing after clipping TODO obtaining same result doing harmonic summing and from clipping +harmsum2 = harmsum.apply_harmonic_summing(frequencies=new_freqs, fil_data=new_samples, precision=0.001, + num_lo_up_harmonics=(5, 5)) +freqs3, samples3 = harmsum2 + +print("<---------------------------------------------------->") +print(freqs3) +print("<---------------------------------------------------->") +print(freqs2) +print("<--------------------Original Samples-------------------------------->") +print(fil_data2) +print("<-------------------Samples after Clipping--------------------------------->") +print(new_samples) +print("<--------------------------Samples after Clipping and harmonic-------------------------->") +print(samples3) +print("<-------------------Samples after harmonic--------------------------------->") +print(samples2) +print("<---------------------------------------------------->") + + +time_series2 = Timeseries(samples2) + +print(time_series2.get()) +print("<---------------------------------------------------->") +time_series3 = Timeseries(fil_data2) +print(time_series3.get()) +print("<---------------------------------------------------->") + +fil_dataframe2 = pd.DataFrame(fil_data2, columns=freqs2) +most_pwrful_freq = fil_dataframe2.sum().idxmax(axis=0, skipna=True) +list_num = [x for x in range(len(fil_data2))] +print(list_num) +fil_dataframe2['Sample'] = list_num + +print("most pwrful freq:" + str(most_pwrful_freq)) +print("<---------------------------------------------------->") +columnsData = fil_dataframe2.loc[ : , most_pwrful_freq ] +print(columnsData) +print("<---------------------------------------------------->") +print(fil_dataframe2) +print("<---------------------------------------------------->") + + +# ax = plt.gca() +# fil_dataframe2.plot(kind='line',x='Sample',y=most_pwrful_freq,ax=ax) +# plt.show() + +# Same thing but with the samples of the harmonic summing + +fil_dataframe3 = pd.DataFrame(samples2, columns=freqs2) +most_pwrful_freq2 = fil_dataframe3.sum().idxmax(axis=0, skipna=True) +list_num = [x for x in range(len(fil_dataframe3))] +print(list_num) +fil_dataframe3['Sample'] = list_num + +print("most pwrful freq:" + str(most_pwrful_freq2)) +print("<---------------------------------------------------->") +columnsData = fil_dataframe3.loc[ : , most_pwrful_freq ] +print(columnsData) +print("<---------------------------------------------------->") +print(fil_dataframe3) +print("<---------------------------------------------------->") + +signal = columnsData.to_numpy() +time_series4 = Timeseries(signal) + +# plot filterbank data without changes +plt.subplot(2,1,2) +plt.plot(time_series3.timeseries) +plt.xlabel('time', fontsize=12) +plt.ylabel('Intensity', fontsize=12) +plt.show() +# waterfall filterbank data after harmonic summing +data, extent = waterfall_plot(fil_data2, freqs2) +img = plt.imshow(data.T, + aspect='auto', + origin='lower', + rasterized=True, + interpolation='nearest', + extent=extent, + cmap='cubehelix') +plt.show() + +# plot filterbank data after harmonic summing +plt.subplot(2,1,2) +plt.plot(time_series2.timeseries) +plt.xlabel('time', fontsize=12) +plt.ylabel('Intensity', fontsize=12) +plt.show() + +# waterfall filterbank data after harmonic summing +data, extent = waterfall_plot(samples2, freqs2) +img = plt.imshow(data.T, + aspect='auto', + origin='lower', + rasterized=True, + interpolation='nearest', + extent=extent, + cmap='cubehelix') +plt.show() + +# plot signal with most intensity isolated from filterbank data after +# harmonic summing +ax = plt.gca() +fil_dataframe3.plot(kind='line',x='Sample',y=most_pwrful_freq,ax=ax) +plt.xlabel('time', fontsize=12) +plt.ylabel('Intensity', fontsize=12) +plt.show() + +# plot timeseries of signal with most intensity isolated from filterbank data after +# harmonic summing +plt.subplot(2,1,2) +plt.plot(time_series4.timeseries) +plt.xlabel('time (s)', fontsize=12) +plt.ylabel('Intensity', fontsize=12) +plt.show() +# plt.subplot(2,1,2) +# plt.plot(time_series.timeseries) +# plt.show() + + diff --git a/sgg+95_4750.epn b/sgg+95_4750.epn new file mode 100644 index 0000000..374fdff --- /dev/null +++ b/sgg+95_4750.epn @@ -0,0 +1 @@ +EPN 6.00 60 Profile from Seiradakis et al. (1995) J0826+2637 0823+26 .530660797580 19.475 5.900tml93 sgg+95 082651.310+263726.000effberg .000 .000 U .00000 .00000 .00000 0505199700000000 1 01024 510.000000 510.000000 0 0 0 --------------------------------------------------------------------------------I 0 0 4.75000000 GHz 500.000000 MHz .00000 .602012E+03-.710000E+01 .274070E+01 .530660797580 10B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B110B112BD0A4A1275083104E9126810EC106F1419106F16390EB20CB905520F5C163218AE179A0A230D4913681660088609031E6303F104E90CF40EED14FE1A7816A117F01C9F10900FB80F4F0CED101407240C1510C4075E126E02E40BF412E40BE10E9112B71069134D0CCD0D3517A809920F9017E9199A12EB0BE70B8511CB1189114E100012E411DE130516AE12B70E840EA4160B15C3109D0B920ED9169B13DE1120114E175211D8096B08C1167A12C410761D7118C80ECC0F0712200D2F164618BB09B4149C135405040F8311620D7008241213141F16B50778090318C816F00D560CCD0865065E162B1B640B501DB90E1B0C7112C414E416320A020FE60D22114E1D3C15C9186C0DA514950DD908791106136112CA0F1A11960B78151211BE0F001ACE08CE19790EB210D80F420DF42227193E17731639137511D1175912751DB910060BCD0CC605D611B7130C10EC10AA126E0D43149C135A167A082A084A0FEC14F10E350F900D9E19100D700D6309FB1DF410F9100609BA1560112D0D7E08E8110617F00E77102E117C16C908D50A37103412FF0D3508A60ED90BB3106F169B13DE083D0EC50F7015881A3E0F8A11DE12C40AD30D2F10210A9912BD12750F2816C912CA19E81375103413821089098517DC106F12130D2212C40A850B57154D06650E210F8303BB0DBF1F560453082A1062150B08580BDA08BA15810C980E01105C0B3C16B509D40F42193E051009FB128F0CA5112D1BBA156D15810FAB015318730A2309A0130C151F0E9E13A9144011F20F900DF415180A5D0D6313C30C01172B102715C90B8511F20F14093E05790DD908D5135A090312B70EB2075E173F03730E49101A0AA60F2813C31069114E0C9F156017BB16C90C7E13400879159B014015C311960C5D087905380DC60DB212C411DE142C06FD11B003DC13A9183E1EE60CFA0E35089F16111C7E104E1F2801B707A718EF07BB0ED2058013880D5612B010F9166614EA0DAB14260F4217C8087F0C1C0AF512F2115B161E10970F00123A142C174C069410DF09720D77150B13B0168E0A0F102E087F11131A300C08138F1D840B3013540D2F104817040B2912E4148E125B12EB0B7816461917110613470C50086C097E0FF91B160717155A10970DF408860AE8127511C40730152C175F0F3B17450A7108310965068D153F053107BB0D22145A0BC6063216600E0E11EB1C4315F015A21319102710210C5710620AE109D40CE708100CED146E0E7D0E4217F005D610760AE80C7713B0194B1AA60FC512B717FD0DBF037317A81275086C0D3505C90E1416C915A80BCD094B0C010BF4095E13B614D0024D07A714DD1319133A0E49078D07E81EAB0D7E12130F4F07C712C40CD30EE617E307020B6B05600FF30EBF17F612820B850E0E126803AE15320ADA04F70F481E2F09290E3C0FDF1625135A10970C0F10B7104818B40DD90A370C2910A40944150B1113135418801162142C1F42131219E81A23104E06B51A300DD30EA400CA0794052A0B850E971B641090147B111A0BD4147B0745048D0DD90AE80AD30F480A5D0E9E0E420E2F08E20F7D0B3C0F8A1D4912A308D50B0F0B501183023A1B9F1525124E189A1D7716870DF40C8B1BFB0F48102717730C0F12C40A1C0AAD1D080E1B10FF10BE158E0C7E101A0D5013DE14D7026004E2167A194B088609301A441C641D0E119613A3139509DB0FD2114E0EB8175913E4134715250CED111311DE159B18EF01120A63197913A9158816941446167A130C1219081008F518790979050B0EB814EA01180C9F0CE714610BE70DD91A30150B031109920C49130C0BC00E6916B5148113950CBF0C9F0F4F0F4F09CE13A91A100CAC0F970CF4175F0C501B3D0D6A0C5D111A137B0A560F901AA01A860D080AF5109D0C4914810DD912340CE712BD146112410F8A0F90101416F00DED16811759161812A31FCC0A7116AE136E12CA14FE186C1440153F1326093E0E420DA5091007A7177A10C40E2F1C92059319A019E209651E0814EA0525106F0F28148E07E819651A0313F115C30BB911EB12D70EC508FC0E9703F10E6914330DED119D0A370C64074514530F4810340EE60F4209E10C9812B71134132D0F0D1034132D14A2147404EF0C29171709A0137523061B3708AC02EB0F5C0F481412155A0DAB0F48073E137B0F3B1993149514B61681132D10B7122010A417802DA62FA54E4A4FAC71B99B95D6CDFAEDFFFFF2F6DB00B37F8B5A652E3F8B343324D02BA020BE21EC1C0824261AE115320D981FE610AA1ABA0DC6176D139C17040BD410140D771A931A101A0912B70F5509A007EF10CB130C06240F1418E9149C06D513F810060EFA112D11550C5D0BB315FE194B1375110D0A290FE614AF153213DE1E350BDA1A231B021241134016600E070EC508C10A630CAC0C49112D12751E150FB108FC1BB30FDF16040C4311DE11BE0DB910D81AE10D5D0E8A069914E4076B14F10C8B0E3C0D8B0EB20F2E119D130C100D0B920F900CFA0AAD101A1006193114AF0BD4098515C30D221412135A18C100000AD304C31A3E13820FBE133A1581101A15A8021916D6095112F81C8B08F5175216CF0AA6158E0F7612FF0F631A1606B5121913A316660DBF15C911060474144D11F911F215A80C4303BB107613FE17B507A711B715AF0E84031115EA0559122007A119650E9110FF0DF412C407D4127517E3138F10B110B110B110B10000000000000000000000000000000000000000000000000000000000000000 \ No newline at end of file diff --git a/test.py b/test.py new file mode 100644 index 0000000..12e7fde --- /dev/null +++ b/test.py @@ -0,0 +1,10 @@ +from pandas import DataFrame +import matplotlib.pyplot as plt + +Data = {'Year': [1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010], + 'Unemployment_Rate': [9.8, 12, 8, 7.2, 6.9, 7, 6.5, 6.2, 5.5, 6.3] + } + +df = DataFrame(Data, columns=['Year', 'Unemployment_Rate']) +df.plot(x='Year', y='Unemployment_Rate', kind='line') +plt.show() \ No newline at end of file diff --git a/timeseries/timeseries.py b/timeseries/timeseries.py index e0ba981..ebd724f 100644 --- a/timeseries/timeseries.py +++ b/timeseries/timeseries.py @@ -54,3 +54,19 @@ def from_filterbank(self, filterbank_object): self.timeseries = time_series return Timeseries(time_series) + + def from_filterbank_data(self, filterbank_data): + """ + Initializes timeseries object from filterbank data, + while on the fly converting all 'combined' channels into + one summed intensity plot. + """ + time_series = [] + + for sample_set in filterbank_data: + summed_sample = np.sum(sample_set) + time_series.append(summed_sample) + + self.timeseries = time_series + + return Timeseries(time_series) diff --git a/write_filterbank_data_readable.py b/write_filterbank_data_readable.py new file mode 100644 index 0000000..f5915da --- /dev/null +++ b/write_filterbank_data_readable.py @@ -0,0 +1,37 @@ +import filterbank.filterbank as filterbank +import filterbank.header as header +import filterbank.filterbank as filterbank + +f = open("filReadable.txt", "w+") +fb = filterbank.Filterbank(filename='./pspm32.fil', read_all=True) + +#print(fb.next_row()) + + + + + +f.write('header: \n') +f.write(str(fb.get_header()) + '\n') +f.write('setup_channels: \n') +f.write(str(fb.setup_chans()) + '\n') +f.write('number of channels:\n') +f.write(str(fb.n_chans) + '\n') +f.write('number of samples: \n') +f.write(str(fb.n_samples) + '\n') +f.write('frequencies: \n') +i = 0 +for freq in fb.get_freqs(): + f.write(str(i) + ': ' + str(freq) + '\n') + i += 1 +f.write('data: \n') +i = 0 +for data in fb.data: + f.write(str(i) + ': ' + str(data) + '\n') + i += 1 + +f.close() +# doesnt work: +# for iterator in range(len(fb.get_freqs())): +# f.write(str(fb.get_freqs()[iterator]) + '\n') +# f.write(str(fb.data()[iterator]) + '\n')