import matplotlib.patches as patches from scipy.spatial import ConvexHull from astropy.io import fits,ascii import astropy import gc import numpy as np import os import time import copy import gzip import sys import pickle import shutil import statistics as stats import glob from multiprocessing import Pool from contextlib import closing from matplotlib import path from scipy.interpolate import interp1d from astropy import units as u import astropy.coordinates as ac #from shapely.geometry import Point, Polygon np.set_printoptions(threshold=sys.maxsize) from astropy.table import Table, vstack, hstack,Column, join import sf_special_utils as sf_utils import python_idl_funcs as pidl_funcs import inside_polygon_check as sf_polycheck def sf_extract_photons(obsid_list,save_fits=True,save_as_ascii=False): ''' A routine to match X-ray events close enough to given set of sources, acquired by other means. A match is considered if the X-ray photon is inside a rectangular box with a given size, drawn around the source. Relies on the double histogram method to perform fast 2-dimensional coordinate matching. sources file contains the ra, dec and id that we created after reading in ra_dec_input ''' PROC_NUM=4 global obsidsglob global save_as_asciis global sources_id global sources_ra global sources_dec save_as_asciis=save_as_ascii obsidsglob=obsid_list nfiles=len(obsid_list) with closing(Pool(processes=PROC_NUM)) as pool: '''Below is the multiprocessing function which goes over each obsid''' resu=pool.map(stacked_event_per_obsid,np.arange(0,nfiles)) pool.terminate() final_stack=[] #print("resu:") #print(resu) print("extracted now appending...") for ele in resu: if isinstance(ele,astropy.table.table.Table): final_stack.append(ele) print("appending done now sorting...") resu=0 final_stack=vstack(final_stack) final_stack.sort('iid') blah=len(np.unique(final_stack["iid"].data)) print("Number of matched sources",blah) #print("fin1",final_stack) print("sorting done now storing...") if save_fits: hdu = fits.BinTableHDU(data=final_stack) hdu.header["BOXSIZE"]=boxsize hdu.writeto(extraction_interm,overwrite=True) inlist=final_stack["iid"].data out=np.unique(inlist) #find missing sources from the input sources list: missing_ids=np.array(list(set(sources_id)-set(out))) cnt_miss=len(missing_ids) if cnt_miss>0: hdul = fits.open(extraction_interm) #event_head=hdul[1].header final_stack=hdul[1].data hdul=0 #prototype=copy.deepcopy(final_stack[0]) #print("prototype",prototype) final_stack=Table(final_stack) final_stack.remove_column('status') #print("all data types",final_stack.dtype) #blank_stacked_event=a_blank_row_maker(prototype)#(copy.copy(final_stack[-1])) colnames=final_stack.colnames#final_stack.dtype.fields.keys() #print("colnames",colnames,len(colnames)) dtyp=str(final_stack.dtype) dtyp=dtyp[1:-1] dtyp=dtyp.split('),') data_types=[] for i in dtyp: bleh=i.split("', '")[1] data_types.append(bleh.split("'")[0]) final_stack.sort('iid') #print("fin",final_stack) hdu = fits.BinTableHDU(data=final_stack) hdu.header["BOXSIZE"]=boxsize hdu.writeto(extraction_stacked_file,overwrite=True) print("missing sources added file stored") os.remove(extraction_interm) return final_stack else: print("no sources or objects found, or overall fits file not written") return def stacked_event_per_obsid(n): ''' The multiprocessing steps - it can go through several obsids at once ''' obsid=obsidsglob[n] get_link=np.where(DIR_OBSID_LIST==obsid)[0][0] search_name=LINK_LIST[get_link] event_file=search_name+'/analysis/'+str(obsid)+'_clean_evt2.fits' print('Processing '+str(obsid))#+' '+event_file) hdul = fits.open(event_file) event_head=hdul[1].header obs_date=event_head["Date-OBS"] obs_date=obs_date.split("-")[0] obs_date=int(obs_date[2:])-1 if obs_date>66 or obs_date<0: obs_date=0 flux_from_soft=CYC_INFO["soft_to_flux"][obs_date] flux_from_hard=CYC_INFO["hard_to_flux"][obs_date] #Step 1: Gather all required data, perform conversions as necessary t4=time.time() event_data=hdul[1].data event_data=Table(event_data) event_data_colnames=event_data.colnames try: photon_ra=event_data['RA'].data photon_dec=event_data['DEC'].data photon_x,photon_y=pidl_funcs.ad2xy(event_head,photon_ra,photon_dec) except KeyError: #print("getting x, y from obs, calculating ra, dec") photon_x=event_data['x'].data photon_y=event_data['y'].data photon_x -= 1. photon_y -= 1. photon_ra,photon_dec=pidl_funcs.xy2ad(event_head,photon_x,photon_y) #if photon_ra==-99 and photon_dec=-99: # return c1=ac.SkyCoord(sources_ra*u.degree,sources_dec*u.degree,frame="icrs") obs_innd=np.where(DIRECTORY_ADDRESS["obsid"].data==obsid)[0] coords=sf_polycheck.chandra_acis_i_fov(DIRECTORY_ADDRESS["rapnt"].data[obs_innd][0],DIRECTORY_ADDRESS["decpnt"].data[obs_innd][0], DIRECTORY_ADDRESS["roll"].data[obs_innd][0]) #sf_polycheck.chandra_acis_i_fov(event_head['RA_PNT'],event_head["DEC_PNT"],event_head['ROLL_PNT']) #print(coords) p=path.Path(coords) #c2=ac.SkyCoord(event_head['RA_PNT']*u.degree,event_head['DEC_PNT']*u.degree,frame="icrs") #dist_=60.*c1.separation(c2).value #arcmins all_points=np.array([sources_ra,sources_dec]).T #print("all_points",all_points[:5]) source_ind_relevant=np.where(p.contains_points(all_points))[0] #if obsid==9409: #print("obsid,relevant inds",obsid,sources_id[source_ind_relevant]) source_id_rel=sources_id[source_ind_relevant] source_ra_rel=sources_ra[source_ind_relevant] source_dec_rel=sources_dec[source_ind_relevant] #now that we have min photon x and y, find source positions: #find source positions in the image #since the photon can be within half the boxsize, we add some padding #to the min and max sources_x, sources_y, source_ind_relevant = pidl_funcs.ad2xy(event_head,source_ra_rel,source_dec_rel, pos_limits=True, minims=[np.min(photon_x)-boxsize/2., np.min(photon_y)-boxsize/2.], maxims=[np.max(photon_x)+boxsize/2., np.max(photon_y)+boxsize/2.]) #print("sources_x",sources_x) #print("sources_y",sources_y) source_id_rel=source_id_rel[source_ind_relevant] source_ra_rel=source_ra_rel[source_ind_relevant] source_dec_rel=source_dec_rel[source_ind_relevant] nsources=len(sources_x) if nsources==0: return sources_x+=1 sources_y+=1 nphotons=len(photon_x) if nphotons==0: return old_x=photon_x old_y=photon_y ''' okay so here, we need to know what is going on with the obsids try: photon_obsid=event_data['OBSID'] except KeyError: if len(obsidsglob)>0: photon_obsid=obsid else: print("No Obsid List and Obsids not given in Structure. STOPPING!") return ''' photon_obsid=obsid ''' tag photons uniquely binding them with the obsid added again by MO ''' photon_tag=np.full(nphotons, 1.e-7) photon_tag*=(np.arange(nphotons)+1) photon_tag+=photon_obsid #Step 2: A histogram is made of the photons using the desired box size ''' We can save work by limiting the histogram to the min and max coordinates of the sources, since photons outside of that np.arange will not be matched ''' #t4=time.time() hist_min=np.array([np.min(photon_x), np.min(photon_y)]) - boxsize hist_max=np.array([np.max(photon_x), np.max(photon_y)]) + boxsize hist_phot,rev_photons,edges=pidl_funcs.hist_nd(np.array([photon_x,photon_y]), hist_min,hist_max,[boxsize,boxsize]) hist_size=np.array(hist_phot.shape) #print("hist_size",hist_size) #print(rev_photons) #print("step 2 time:",time.time()-t4) ''' Step 3: Each source is matched with a bin in the histogram, along with adjacent bins that are also near the source Recall that the bin size for the histogram is the same as the box size for photon matching. Therefore all photons close enough to a source to be matched will either be in the bin containing the source, or in one of the three bins that are adjacent to the bin containing the source. First we find the x and y distances of the sources to the origin of the histogram (scaled to 1 unit = 1 boxsize of distance) ''' #t4=time.time() source_dist_x = (sources_x - hist_min[0])/boxsize source_dist_y = (sources_y - hist_min[1])/boxsize #The whole part of this scaled distance gives the index of the current bin ind_bin_x = np.floor(source_dist_x)#[0] ind_bin_y = np.floor(source_dist_y)#[0] ''' The fraction shows us which part of the bin the sources are located in. Using this we can calculate the indices of the other 3 adjacent bins we're interested in. ''' bin_offset_x = np.sign( source_dist_x - ind_bin_x - 0.5 ) bin_offset_y = np.sign( source_dist_y - ind_bin_y - 0.5 ) #The indices of the bins containing each source are stored in an array source_bin_index = (ind_bin_y * hist_size[0]) + ind_bin_x ''' Similarly, the indices of the bins adjacent to each source are stored in arrays, one for horizontal adjacency, one for vertical, and one for diagonal ''' source_bin_index_horizontal = source_bin_index + bin_offset_x source_bin_index_vertical = source_bin_index + (bin_offset_y * hist_size[0]) source_bin_index_diagonal = source_bin_index_vertical + bin_offset_x #print("step 3 time:",time.time()-t4) #Step 4: Initializations for the for-loop #t4=time.time() source_ind_match = np.full(nphotons,-1) photon_dist_x = np.zeros(nphotons) photon_dist_y = np.zeros(nphotons) dupmatches = np.array([]) sources_nphotons = np.zeros(len(sources_x)) #print("step 4 time:",time.time()-t4) ''' Step 5: For each source, match nearby photons that are within a box of size boxsize surrounding the source. Photons that are matched to multiple sources must be kept track of ''' t4=time.time() ind_dupmatches = np.array([])#dupmatches_T[0,0:].astype(int)#[1:n_dupmatches] ind_dupmatched_sources = np.array([])#dupmatches_T[1,0:].astype(int)#[n_dupmatches+1:2*n_dupmatches] dupmatch_xpos = np.array([])#dupmatches_T[2,0:]#[2*n_dupmatches+2:3*n_dupmatches] dupmatch_ypos = np.array([])#dupmatches_T[3,0:]#[3*n_dupmatches+2:4*n_dupmatches] for ind_source in np.arange(0,len(sources_x)): ind_photons=np.array([]) ''' Get the indices of the 4 relevant bins from the source_bin_index arrays (i.e. the containing bin, and the horizontally, vertically, and diagonally adjacent bins) ''' bin_ind_c = int(source_bin_index[ind_source]) bin_ind_h = int(source_bin_index_horizontal[ind_source]) bin_ind_v = int(source_bin_index_vertical[ind_source]) bin_ind_d = int(source_bin_index_diagonal[ind_source]) #Get the indices of photons in the bin that the source is located in #rev_photons comes from hist_nd if rev_photons[bin_ind_c+1]!=rev_photons[bin_ind_c]: #print("rev_photons[rev_photons[bin_ind_c]:rev_photons[bin_ind_c+1]]",rev_photons[rev_photons[bin_ind_c]:rev_photons[bin_ind_c+1]]) ind_photons=np.concatenate((ind_photons,rev_photons[rev_photons[bin_ind_c]:rev_photons[bin_ind_c+1]])) #Do the same for the 3 adjacent bins if rev_photons[bin_ind_h+1]!=rev_photons[bin_ind_h]: #print(rev_photons[rev_photons[bin_ind_h]:rev_photons[bin_ind_h+1]]) ind_photons = np.concatenate((ind_photons,rev_photons[rev_photons[bin_ind_h]:rev_photons[bin_ind_h+1]])) if rev_photons[bin_ind_v+1]!=rev_photons[bin_ind_v]: #print(rev_photons[rev_photons[bin_ind_v]:rev_photons[bin_ind_v+1]]) ind_photons = np.concatenate((ind_photons, rev_photons[rev_photons[bin_ind_v]:rev_photons[bin_ind_v+1]])) if rev_photons[bin_ind_d+1]!=rev_photons[bin_ind_d]: #print(rev_photons[rev_photons[bin_ind_d]:rev_photons[bin_ind_d+1]]) ind_photons = np.concatenate((ind_photons, rev_photons[rev_photons[bin_ind_d]:rev_photons[bin_ind_d+1]])) n_photons = len(ind_photons) #If there are no photons in any of the bins, go to next source if n_photons==0: continue ind_photons = ind_photons[:n_photons] ind_photons = np.unique(ind_photons).astype(int) pos_x = photon_x[ind_photons] - sources_x[ind_source] pos_y = photon_y[ind_photons] - sources_y[ind_source] dist_x = np.abs(pos_x) dist_y = np.abs(pos_y) sg_ind_ind_match = np.logical_and(dist_x<=boxsize/2.,dist_y<=boxsize/2.) #print("ind_photons ra for source no:",ind_source, photon_ra[ind_photons]) #print("ind_photons dec for source no:",ind_source,photon_dec[ind_photons]) if np.any(sg_ind_ind_match): ind_match=ind_photons[sg_ind_ind_match] #print("ind_photons ra",photon_ra[ind_match]) #print("ind_photons dec",photon_dec[ind_match]) sources_nphotons[ind_source] = len(ind_match) #keep track of the photons associated with each source rch 11 june 2010 #handle photons that are already matched to a source dup_match = np.where(source_ind_match[ind_match]!=-1)[0] single_match = np.where(source_ind_match[ind_match]==-1)[0] if len(dup_match)>0: #print("yes duplicates for source no:",ind_source) ind_dupmatch = ind_match[dup_match] #print("ind dupmatch",photon_ra[ind_dupmatch],photon_dec[ind_dupmatch]) ind_source_col = np.full(len(ind_dupmatch),ind_source) dupmatch_posx = (pos_x[sg_ind_ind_match])[dup_match] dupmatch_posy = (pos_y[sg_ind_ind_match])[dup_match] ind_dupmatches=np.concatenate((ind_dupmatches,ind_dupmatch)) ind_dupmatched_sources=np.concatenate((ind_dupmatched_sources,ind_source_col)) dupmatch_xpos=np.concatenate((dupmatch_xpos,dupmatch_posx)) dupmatch_ypos=np.concatenate((dupmatch_ypos,dupmatch_posy)) if len(single_match)>0: source_ind_match[ind_match[single_match]]=ind_source photon_dist_x[ind_match[single_match]] = (pos_x[sg_ind_ind_match])[single_match] photon_dist_y[ind_match[single_match]] = (pos_y[sg_ind_ind_match])[single_match] ''' append multiple matches of photons to sources close enough done here in order not to reallocate arrays multiple times inside the loop above ''' #dupmatches_T=dupmatches.T #print("dupmatches",dupmatches,len(dupmatches)) n_dupmatches = len(ind_dupmatches)#int(len(dupmatches)/4) ind_dupmatches=ind_dupmatches.astype(int) ind_dupmatched_sources=ind_dupmatched_sources.astype(int) #add user-included fields to output structure #copy_cols issues #source_cols issues - not part of the code yet #print("1/3 of the way through step 5", time.time()-t4) #define an ascii table #find the indices of matched photons. match_ind=np.where(source_ind_match!=-1)[0] relevant_photon_ct=len(match_ind) if n_dupmatches>0: ind_including_dup = np.concatenate((match_ind, ind_dupmatches)) else: ind_including_dup = np.array(match_ind) ind_including_dup=ind_including_dup.astype(int) #We now populate stacked_event. This process takes 5 steps to complete: #1 - Copy information from event_data: tab_ent=len(ind_including_dup) if tab_ent==0: return desired_columns=['time', 'ccd_id', 'node_id', 'expno', 'chipx', 'chipy', 'tdetx', 'tdety', 'detx', 'dety', 'x', 'y', 'pha', 'energy', 'pi', 'fltgrade', 'grade', 'status', 'obsid', 'ra', 'dec', 'old_x', 'old_y', 'evt_tag', 'iid', 'ira', 'idec'] irrelevant_cols = set(event_data_colnames) - set(desired_columns) stacked_event=event_data[ind_including_dup] stacked_event.remove_columns(list(irrelevant_cols)) stat=stacked_event['status'].data if save_as_asciis: stat_str=str(stat) stat_str=stat_str.replace("[ ","") stat_str=stat_str.replace("[","") stat_str=stat_str.replace("]","") stat_str=stat_str.replace("\n "," ") stat_str_many=stat_str.split("\n ") stacked_event.remove_column('status') stat_str_col= Column(stat_str_many, name='status') stacked_event.add_column(stat_str_col) ''' to reanimate booolean column: >>> a = np.array(['True', 'False', 'True', 'False']) >>> a array(['True', 'False', 'True', 'False'], dtype='|S5') a == "True" array([ True, False, True, False], dtype=bool) ''' desired_columns=['time', 'ccd_id', 'node_id', 'expno', 'chipx', 'chipy', 'tdetx', 'tdety', 'detx', 'dety', 'x', 'y', 'pha', 'energy', 'pi', 'fltgrade', 'grade', 'status', 'obsid', 'ra', 'dec', 'old_x', 'old_y', 'evt_tag', 'iid', 'ira', 'idec'] relevant_cols=set(desired_columns) - set(event_data_colnames) #& set(stacked_event_colnames) stacked_event_2 = Table(np.zeros(tab_ent*len(relevant_cols)).reshape(tab_ent,len(relevant_cols)), names=list(relevant_cols)) stacked_event=hstack([stacked_event, stacked_event_2]) #print("length of stacked_event", len(stacked_event),relevant_photon_ct,len(stacked_event_2),int(photon_obsid)) #print("time taken in 1/2 of step 5:",time.time()-t4) #II- Update the unique photons: #fill in the photon information stacked_event['evt_tag'][0:relevant_photon_ct] = photon_tag[match_ind] #for everything else we need photon by photon info, but obsid is the same for #everything in each instance of this function, so commenting out the bottom: stacked_event['obsid'][0:relevant_photon_ct] = int(photon_obsid) #stacked_event['obsid'][0:] = int(photon_obsid) stacked_event['old_x'][0:relevant_photon_ct] = old_x[match_ind] stacked_event['old_y'][0:relevant_photon_ct] = old_y[match_ind] stacked_event['ra'][0:relevant_photon_ct] = photon_ra[match_ind] stacked_event['dec'][0:relevant_photon_ct] = photon_dec[match_ind] #fill in source information stacked_event['iid'][0:relevant_photon_ct] = source_id_rel[source_ind_match[match_ind]] stacked_event['ira'][0:relevant_photon_ct] = source_ra_rel[source_ind_match[match_ind]] stacked_event['idec'][0:relevant_photon_ct] = source_dec_rel[source_ind_match[match_ind]] #overwrite x and y coordinates with those relative to the #source we're matched with stacked_event['x'][0:relevant_photon_ct] = photon_dist_x[match_ind] stacked_event['y'][0:relevant_photon_ct] = photon_dist_y[match_ind] #print("relevant_photon_ct and dupmatch num", relevant_photon_ct,n_dupmatches) #print(stacked_event,len(stacked_event)) #III- Update duplicated photons #print("0.6 of the way through step 5", time.time()-t4) if n_dupmatches>0: size_stacked_event = int(relevant_photon_ct) + n_dupmatches# - 2L #Update photon information stacked_event["evt_tag"][relevant_photon_ct:size_stacked_event] = photon_tag[ind_dupmatches] stacked_event["old_x"][relevant_photon_ct:size_stacked_event] = old_x[ind_dupmatches] stacked_event["old_y"][relevant_photon_ct:size_stacked_event] = old_y[ind_dupmatches] stacked_event["ra"][relevant_photon_ct:size_stacked_event] = photon_ra[ind_dupmatches] stacked_event["dec"][relevant_photon_ct:size_stacked_event] = photon_dec[ind_dupmatches] #Update source information stacked_event["iid"][relevant_photon_ct:size_stacked_event] = source_id_rel[ind_dupmatched_sources] stacked_event["ira"][relevant_photon_ct:size_stacked_event] = source_ra_rel[ind_dupmatched_sources] stacked_event["idec"][relevant_photon_ct:size_stacked_event] = source_dec_rel[ind_dupmatched_sources] stacked_event["x"][relevant_photon_ct:size_stacked_event] = dupmatch_xpos stacked_event["y"][relevant_photon_ct:size_stacked_event] = dupmatch_ypos stacked_event.sort('iid') stacked_event.add_column(flux_from_soft,name="flux_from_soft") stacked_event.add_column(flux_from_hard,name="flux_from_hard") return stacked_event def sf_new_prepare_data(sf_calc=True,#R90_FRACS,sf_calc=True, set_bg_files=True,set_bg_fileh=True, set_rmax=True, set_badid=True,fits_file_name=None): ''' so sf_new_prepare data seems to call sf_obs_exposure with stack_out and exp_out as output but these variables are not called or used in sf_obs_exposure, which seems to want a fits file as input? what is the form of the output? need to figure out if the path_mys are working correctly what badstar etc is, multiprocessing part ''' global boxsize,flag_all,source_id,ra,dec,x,y,old_x,old_y,energy,source_ra,source_dec,ph_obsid ,test_obsid boxsize=float(outp_boxsize.value) sky_name='/analysis/057.exp.bin1.fits.gz' det_name='/analysis/exp.bin1.fits.gz' if not hasattr(boxsize, '__len__'): boxsize=100. ''' if not hasattr(R90_FRACS, '__len__'): R90_FRACS = [.25, .5, .75, 1., 1.25, 1.5] ''' #where are we supposed to be getting these from? #bg_files=bg_files, bg_fileh=bg_fileh, rmax=rmax, badid=badid ''' test_input=ascii.read(pointing_file_name)#obs_pointing_file) test_obsid=test_input["obsid"].data obs_ra=test_input["rapnt"].data obs_dec=test_input["decpnt"].data obs_roll=test_input["roll"].data ''' #test_str=[str(i) for i in test_obsid] ''' exp_map_sky=[] exp_map_det=[] #bg_files=[] good_exp_map_sky_names=[] good_exp_map_det_names=[] #good_bg_file_names=[] ''' badid=BAD_OBS #[] #*******************WHAT IS BG_FILEH NAMES******************* good_bg_fileh_names=[] ''' #no need to test for badids again for obsid_num in test_obsid: get_link=np.where(DIR_OBSID_LIST==obsid_num)[0][0] search_name=LINK_LIST[get_link] #f = gzip.open(search_name+'/analysis/057.exp.bin1.fits.gz') try: #print("search_name+sky_name",search_name+sky_name) fits.open(search_name+sky_name) and fits.open(search_name+det_name) except HTTPError: badid.append(obsid_num) ''' #print("Bad observations:",len(badid)) #print(badid) if not set_rmax: rmax=100. if fits_file_name!=None: stack_file=fits_file_name hdul = fits.open(stack_file) else: stack_file=extraction_stacked_file #print("stack_file",extracted_file_name) hdul = fits.open(extraction_stacked_file) event_head=hdul[1].header boxsize = float(outp_boxsize.value)#event_head['BOXSIZE'] stacked_event=hdul[1].data right_inds=np.where(stacked_event['time'].flatten()>0)[0] stacked_event=stacked_event[right_inds] source_id,ra,dec,x,y,old_x,old_y,energy,source_ra,source_dec,ph_obsid = stacked_event['iid'],stacked_event['ra'],\ stacked_event['dec'],stacked_event['x'],\ stacked_event['y'],stacked_event['old_x'],\ stacked_event['old_y'],stacked_event['energy'],\ stacked_event['ira'],stacked_event['idec'],\ stacked_event['obsid'] test_obsid=np.unique(ph_obsid) #ignore obsid of duplicates: zero_obs=np.where(test_obsid>0)[0] test_obsid=test_obsid[zero_obs] #print("test_obsid",ph_obsid) ''' sets it so id0 is now the *first* photon in the list associated with each id ''' sorted_id,id0=np.unique(source_id,return_index=True) #ascii.write([sid[id0],source_ra[id0],source_dec[id0]],"what_the_hell.txt",names=["id","ra","dec"]) #succ_diff is the same as np.diff nphv=np.concatenate((np.diff(id0),np.array([len(source_id)-id0[-1]]))) ''' set the number of photons to zero where there are no photons (energy == 0) ''' e0 = np.where(energy[id0]==0)[0] nphv[e0] = 0 #get the vectors for each input: idv = source_id[id0] #if you have already done it can step over if sf_calc: sf_call_calc_exp(idv,id0,boxsize,source_id,source_ra, source_dec, sky_name, det_name,stacked_event['flux_from_soft'],stacked_event['flux_from_hard'],ph_obsid, True,True,exp_file)#R90_FRACS,exp_file) #we don't need anything more than sf_calc if we are doing splitsamples full sample run if flag_all: mdist = np.full(len(ra),-99.) with closing(Pool(processes=PROC_NUM)) as pool: resu=pool.map(sf_utils.calculate_mdists,np.ndarray.tolist(np.arange(len(test_obsid))))#np.ndarray.tolist(arg_appro)) #print("resu[:,0],resu[:,1]",resu[:1]) #for mdists,inds in zip(resu[:,0],resu[:,1]): for mem in resu: mdist[mem[1]]=mem[0]#mdists dist = np.sqrt(x**2+y**2) #print("mdist",mdist) r90=sf_utils.calculate_r90(mdist)#1.5+(9.*(mdist/10.)**1.9) bad=np.zeros(len(mdist)) #if set_badid: if len(badid)>0: for i in np.arange(len(badid)): bad_photon_indices=np.where(ph_obsid==badid[i])[0] if len(bad_photon_indices)>0: bad[bad_photon_indices]=1 with open(prepare_file_name, 'wb') as f: pickle.dump([stack_file,boxsize,source_id,ra,dec,x,y,old_x,old_y,\ energy,source_ra,source_dec,mdist,r90,badid,bad,dist,\ id0,idv,nphv,ph_obsid,stacked_event['flux_from_soft'],\ stacked_event['flux_from_hard']], f) return def sf_call_calc_exp(idv, id0, boxsize, source_id, source_ra, source_dec, sky_name, det_name,flux_conv_soft,flux_conv_hard,ph_obsid, set_bg_files,set_bg_fileh, #R90_FRACS, exp_file):#, ncpu=ncpu, nthread=nthread): idv=idv.astype(float) input_file=ascii.read(pointing_file_name) obsid,obs_ra,obs_dec,obs_roll=input_file["obsid"].data,input_file["rapnt"].data,\ input_file["decpnt"].data,input_file["roll"].data #force deallocation of read file input_file=0 #Get vectors that say which sources and observations correspond to each other source_obs_ind,source_src_id,source_src_ra,source_src_dec,source_obs_obs,source_obs_mdist=sf_new_match_source_obs(idv, source_id[id0], source_ra[id0], source_dec[id0], obsid, obs_ra, obs_dec, obs_roll, boxsize) source_obs_r90=calculate_r90(source_obs_mdist)#(1.5 + 10.*(source_obs_mdist/10.)**1.9) # ADG: 6/10/14 Pixel size + HRMA at 4keV #calculate r90 in pixels source_obs_srcrad_in = source_obs_r90 / 0.492 source_obs_srcrad=np.clip(source_obs_srcrad_in,3.,None) #source_obs_srcrad[pixel_cap]=3. #set up variance arrays for both sky and detector exposure source_obs_exp_sky_var=np.zeros(len(source_obs_obs)*len(R90_FRACS)).reshape(len(source_obs_obs),len(R90_FRACS)) source_obs_exp_det_var = source_obs_exp_sky_var #set up multitasking: - but first try linear #exp_map_sky is the path_my, but we need to check that it is doing what we expect #you have too call it four times source_obs_exp_sky,source_obs_exp_sky_var,max_sky_exp,source_obs_exp_det,\ source_obs_exp_det_var,source_obs_bgs,\ max_det_exp,source_id_block=sf_calc_exp_multiprocess(obsid,source_obs_obs, source_src_ra,# source_ra[id0[source_obs_ind]], source_src_dec,#source_dec[id0[source_obs_ind]], source_src_id,#source_id[id0[source_obs_ind]], source_obs_srcrad, sky_name,det_name) #source_x_block,source_y_block #database_dir,#R90_FRACS,database_dir, #as there is no difference between bgh and bgs: source_obs_bgh=source_obs_bgs #find where the unique obsids are: obs_uniq,obs_ind_uniq_ind=np.unique(ph_obsid,return_index=True) flux_c_soft,flux_c_hard=np.full(len(source_obs_obs),1.0),np.full(len(source_obs_obs),1.0) for obsi,indo in zip(obs_uniq,obs_ind_uniq_ind): indd=np.where(source_obs_obs==obsi)[0] #print("in exp file maker",source_obs_obs[indd],obsi) flux_c_soft[indd]=flux_conv_soft[indo] flux_c_hard[indd]=flux_conv_hard[indo] #slightly changed the output because uneven column lengths #cannot be saved in python like it can be in IDL big_list=[source_obs_obs, max_det_exp, source_obs_ind, source_id_block, source_obs_mdist,source_obs_r90, source_obs_srcrad, source_obs_exp_sky, source_obs_exp_det, source_obs_bgh, source_obs_bgs, source_obs_exp_sky_var, source_obs_exp_det_var, flux_c_soft,flux_c_hard] #source_x_block,source_y_block, source_obs_exp_sky_var, source_obs_exp_det_var] t=Table(big_list, names=['source_obs_obs', 'max_det_exp', 'source_obs_ind', 'source_id', 'source_obs_mdist','source_obs_r90', 'source_obs_srcrad', 'source_obs_exp_sky','source_obs_exp_det','source_obs_bgh', 'source_obs_bgs',#'source_x_block','source_y_block', 'source_obs_exp_sky_var','source_obs_exp_det_var','flux_c_soft','flux_c_hard']) t.write(exp_file,overwrite=True,format='fits') return def sf_calc_exp_multiprocess(obsid,source_obs_obs,source_ra,source_dec,source_id, r90,path_skys,path_dets): #,R90_FRACS,database_dir,path_skys,path_dets): #set_average): """ set up multiprocessing for sf_calc_exp """ PROC_NUM=4 multiPROC_NUM=300 global source_obs_obsss global source_rass global source_decss global source_idss global r90ss global R90_FRACSss #global set_averagess global path_sky global path_det #global datab_dir source_obs_obsss,source_rass,source_decss,path_sky=source_obs_obs,source_ra,source_dec,path_skys source_idss,r90ss,R90_FRACSss,path_det=source_id,r90,R90_FRACS,path_dets #print("num of objects:",len(obsid)) #args=np.array([obsid,path_my]).T txrbnow=time.time() ind_limits=np.arange(0,len(obsid),multiPROC_NUM) mod_num=np.mod(len(obsid),multiPROC_NUM) #exp,var,max_exp=np.array([]),np.array([]),np.array([]) if mod_num>0: ind_limits=np.concatenate((ind_limits,np.array([len(obsid)]))) with closing(Pool(processes=PROC_NUM)) as pool: for i in np.arange(0,len(ind_limits)-1): arg_appro=obsid[ind_limits[i]:ind_limits[i+1]] #print("arg appro",np.ndarray.tolist(arg_appro)) #PROC_NUM=ind_limits[i+1]-ind_limits[i] #resu=pool.map(sf_calc_exp,np.ndarray.tolist(arg_appro)) resu=pool.map(sf_calc_exp_four_at_once,np.ndarray.tolist(arg_appro)) #print("resu",resu[0]) resu=np.array(resu) exp_sky_small,var_sky_small,max_exp_sky_small,\ exp_det_small,var_det_small,exp_det_non_avg_small,\ max_exp_det_small,source_id_block_small=resu[:,0],resu[:,1],resu[:,2],\ resu[:,3],resu[:,4],resu[:,5],\ resu[:,6],resu[:,7]#,resu[:,8],resu[:,9] \source_x_block_small,source_y_block_small if i==0: exp_sky,var_sky,max_exp_sky,\ exp_det,var_det,exp_det_non_avg,\ max_exp_det,source_id_block=np.sum(exp_sky_small,axis=0),np.sum(var_sky_small,axis=0),\ np.sum(max_exp_sky_small,axis=0),np.sum(exp_det_small,axis=0),\ np.sum(var_det_small,axis=0),np.sum(exp_det_non_avg_small,axis=0),\ np.sum(max_exp_det_small,axis=0),np.sum(source_id_block_small,axis=0)#,\ #np.sum(source_x_block_small,axis=0),np.sum(source_y_block_small,axis=0) ,\source_x_block,source_y_block else: exp_sky,var_sky,max_exp_sky,\ exp_det,var_det,exp_det_non_avg,\ max_exp_det,source_id_block=exp_sky+np.sum(exp_sky_small,axis=0),\ var_sky+np.sum(var_sky_small,axis=0),\ max_exp_sky+np.sum(max_exp_sky_small,axis=0),\ exp_det+np.sum(exp_det_small,axis=0),\ var_det+np.sum(var_det_small,axis=0),\ exp_det_non_avg+np.sum(exp_det_non_avg_small,axis=0),\ max_exp_det+np.sum(max_exp_det_small,axis=0),\ source_id_block+np.sum(source_id_block_small,axis=0),\ #source_x_block+np.sum(source_x_block_small,axis=0),\ #source_y_block+np.sum(source_y_block_small,axis=0) ,\source_x_block,source_y_block pool.terminate() max_exp_sky,max_exp_det,source_id_block=(max_exp_sky.T)[0],(max_exp_det.T)[0],(source_id_block.T)[0] print("time taken to calculate current source_exp:", time.time()-txrbnow) #return exp,var,max_exp return exp_sky,var_sky,max_exp_sky,exp_det,var_det,exp_det_non_avg,max_exp_det,source_id_block#,source_x_block,source_y_block def sf_calc_exp_four_at_once(obsid): global path_skyo try: len(obsid) except TypeError: obsid=np.array([obsid]) source_id_exp=np.zeros(len(source_obs_obsss)*len(R90_FRACSss)).reshape(len(source_obs_obsss),len(R90_FRACSss)) source_exp_sky=np.zeros(len(source_obs_obsss)*len(R90_FRACSss)).reshape(len(source_obs_obsss),len(R90_FRACSss)) source_var_sky=np.zeros(len(source_obs_obsss)*len(R90_FRACSss)).reshape(len(source_obs_obsss),len(R90_FRACSss)) max_exposure_sky=np.zeros(len(source_obs_obsss)*len(R90_FRACSss)).reshape(len(source_obs_obsss),len(R90_FRACSss)) source_exp_det=np.zeros(len(source_obs_obsss)*len(R90_FRACSss)).reshape(len(source_obs_obsss),len(R90_FRACSss)) source_var_det=np.zeros(len(source_obs_obsss)*len(R90_FRACSss)).reshape(len(source_obs_obsss),len(R90_FRACSss)) max_exposure_det=np.zeros(len(source_obs_obsss)*len(R90_FRACSss)).reshape(len(source_obs_obsss),len(R90_FRACSss)) source_exp_det_non_avg=np.zeros(len(source_obs_obsss)*len(R90_FRACSss)).reshape(len(source_obs_obsss),len(R90_FRACSss)) #weird_obsids=[]#[6999,9759] for i in np.arange(len(obsid)): get_link=np.where(DIR_OBSID_LIST==obsid[i])[0][0] search_name=LINK_LIST[get_link] #hdul = fits.open(search_name+'/analysis/057.exp.bin1.fits.gz') path_skyo=search_name+path_sky path_deto=search_name+path_det #get indices of sources inside the given observation current_source_indices=np.where(source_obs_obsss==obsid[i])[0] #print("current obsid",obsid[i]) source_ct=len(current_source_indices) if source_ct==0: continue #print("path_my",obsid[i],i,path_mys) #f=gzip.open(path_skyo) #get header and convert to relative x&y for this observation hdul = fits.open(path_skyo) obs_header=hdul[0].header exposure_map=hdul[0].data hdul=0 #f=gzip.open(path_deto) #get header and convert to relative x&y for this observation hdul = fits.open(path_deto) obs_header_det=hdul[0].header exposure_map_det=hdul[0].data hdul=0 source_x,source_y=pidl_funcs.ad2xy(obs_header,source_rass[current_source_indices], source_decss[current_source_indices]) source_x_det,source_y_det=pidl_funcs.ad2xy(obs_header_det,source_rass[current_source_indices], source_decss[current_source_indices]) #print("source_x,source_y",source_x,source_y) src_cnt_ind=0 #if obsid[i]==15603 or obsid[i]==14675: # print("obsid",obsid[i],source_x,source_y) for source_ind in current_source_indices: rad = R90_FRACSss*r90ss[source_ind] exp, errexp, exp_var=sf_utils.stackfast_aper(exposure_map,source_x[src_cnt_ind],source_y[src_cnt_ind], sky=0.,skyerr=0.,phpadu=1.,apr=rad, skyrad=0.,badpix=[0.,0.],setskyval=0) source_exp_sky[source_ind,:] = exp/(np.pi*rad**2) source_var_sky[source_ind,:] = exp_var max_exposure_sky[source_ind,:]=obs_header["EXPOSURE"] source_id_exp[source_ind,:]=source_idss[source_ind] #if 5288619 in source_id_exp[source_ind,:]: # print("found", 5288619, obsid) #print("source_id_exp[source_ind,:]",source_id_exp[source_ind,:]) #print("source_exp_sky[source_ind,:]",source_exp_sky[source_ind,:]) exp, errexp, exp_var=sf_utils.stackfast_aper(exposure_map_det,source_x_det[src_cnt_ind], source_y_det[src_cnt_ind], sky=0.,skyerr=0.,phpadu=1.,apr=rad, skyrad=0.,badpix=[0.,0.],setskyval=0) source_exp_det[source_ind,:] = exp/(np.pi*rad**2) source_var_det[source_ind,:] = exp_var source_exp_det_non_avg[source_ind,:] = exp max_exposure_det[source_ind,:]=obs_header["EXPOSURE"] #print("path_my",source_obs_obsss[source_ind],obsid[i],source_exp[source_ind,:]) src_cnt_ind+=1 exposure_map=0 exposure_map_det=0 #print("max exposure",max_exposure,len(max_exposure)) #print("source_exp,source_var",source_exp,source_var) return source_exp_sky,source_var_sky,max_exposure_sky,\ source_exp_det,source_var_det,source_exp_det_non_avg,\ max_exposure_det,source_id_exp#,source_x,source_y def sf_new_match_source_obs(source_id, source_id_id0, source_ra, source_dec, obs_id, obs_ra, obs_dec, obs_roll, boxsize): '''completed''' print('MATCH_SOURCE_OBS: starting the matching process..') #output vectors out_ind = np.array([]) out_src_id,out_src_ra,out_src_dec=np.array([]),np.array([]),np.array([]) out_obsid = np.array([]) out_mdist = np.array([]) c1=ac.SkyCoord(source_ra*u.degree,source_dec*u.degree,frame="icrs") for ra_now,dec_now,obs_roll_now,obs_id_now in zip(obs_ra,obs_dec,obs_roll,obs_id): coords=sf_polycheck.chandra_acis_i_fov(ra_now,dec_now,obs_roll_now) #print(coords) p=path.Path(coords) #poly = MultiPoint(coords).convex_hull#Polygon(coords) c2=ac.SkyCoord(ra_now*u.degree,dec_now*u.degree,frame="icrs") dist_=60.*c1.separation(c2).value #arcmins all_points=np.array([source_ra,source_dec]).T #print("all_points",all_points[:5]) is_point_in_poly=np.where(p.contains_points(all_points))[0] out_ind=np.concatenate((out_ind,is_point_in_poly)) out_obsid=np.concatenate((out_obsid,np.full(len(is_point_in_poly),obs_id_now))) out_mdist=np.concatenate((out_mdist,dist_[is_point_in_poly])) out_src_id=np.concatenate((out_src_id,source_id_id0[is_point_in_poly])) out_src_ra=np.concatenate((out_src_ra,source_ra[is_point_in_poly])) out_src_dec=np.concatenate((out_src_dec,source_dec[is_point_in_poly])) #out_ind,out_obsid,out_mdist=np.array(out_ind),np.array(out_obsid),np.array(out_mdist) #print("source_id[out_ind]",source_id[out_ind]) out_ind=out_ind.astype(int) #print('out_ind before',out_ind) id_ind=np.argsort(source_id[out_ind]) #print("id_ind",id_ind) #print(type(out_ind),out_ind) out_ind=out_ind[id_ind] out_src_id=out_src_id[id_ind] out_src_ra=out_src_ra[id_ind] out_src_dec=out_src_dec[id_ind] #print("out_ind",out_ind) out_obsid = out_obsid[id_ind].astype(int) out_mdist = out_mdist[id_ind] #print("out_ind,out_obsid,out_mdist",out_obsid,out_mdist) print('MATCH_SOURCE_OBS: returning..') #for z,n in zip(out_src_id,out_obsid): # print(z,n) return out_ind,out_src_id,out_src_ra,out_src_dec,out_obsid,out_mdist #set parameter routines def sf_new_set_parameters(create_srcimages=False,run_background=True,\ softe_new=np.array([500.,2000.]),harde_new=np.array([2000., 7000.]),\ mdistmax=1e9,lim=np.array([-1,200]),\ r90_frac=1.,min_exp_frac=0.0): ''' incomplete: where do these variables comes from use_bgh, ~bad, exclude_list? also what are we saving? save the variables i need do_stack also where were all the mdistmax, bad etc coming from? in exp_out: obsid, max_det_exp, source_obs_ind, source_obs_obs, source_obs_mdist, $ source_obs_r90, source_obs_srcrad, R90_FRACS, source_obs_exp_sky, $ source_obs_exp_det, source_obs_bgh, source_obs_bgs, $ source_obs_exp_sky_var, source_obs_exp_det_var, filename=exp_file in stack_out: stack_file,boxsize,id,ra,dec,x,y,old_x,old_y,energy,source_ra, $ source_dec, mdist, r90, badid, bad, dist, id0, idv, $ nphv, ph_obsid, filename=save_name ''' global boxsize boxsize=float(outp_boxsize.value) with open(prepare_file_name, 'rb') as f: stack_file,boxsize,source_id,ra,dec,x,y,old_x,old_y,\ energy,source_ra,source_dec,mdist,r90,badid,bad,dist,\ id0,idv,nphv,ph_obsid,i_flux_from_soft,i_flux_from_hard = pickle.load(f) use_bgh = len(exp_out["source_obs_bgh"].data)>1 use_bgs = len(exp_out["source_obs_bgs"].data)>1 use_exp = ~(use_bgs or use_bgh) if create_srcimages: experimental_sf_create_src_imgs(x,y,energy,source_id, idv,id0,exp_out["source_obs_ind"], exp_out["source_obs_obs"],sources_id,#sources_file["sources_id"], sources_ra,sources_dec,boxsize) #sources_file["sources_ra"],sources_file["sources_dec"], #boxsize) if run_background: i_srcph_bgaper,srcph_bgfrac,\ bg_in_srcaper_soft,bg_in_srcaper_hard,bg_in_srcaper_soft_flux,bg_in_srcaper_hard_flux=sf_perform_background(mdistmax,softe_new,harde_new,r90_frac) ''' ; ========= Run the old parameter output routine ============ Define three main array sizes, (photon-source, source-obs, source) and make blank arrays for each size''' n_photon_source_matches = len(x)#len(stack_out["x"].data) n_source_obs_matches = len(exp_out["source_obs_ind"].data) n_sources = len(id0)#len(stack_out["id0"].data) #most likely don't need these blank lists - delete after defining everything else photon_source_int_blank=np.zeros(n_photon_source_matches).astype(int) photon_source_float_blank=np.zeros(n_photon_source_matches) source_obs_double_blank=np.zeros(n_photon_source_matches) source_obs_int_blank=np.zeros(n_photon_source_matches).astype(int) source_int_blank=np.zeros(n_sources).astype(int) sources_float_blank=np.zeros(n_sources) obs_obsids=np.array(np.unique(ph_obsid)) srcph_srcids = source_id src_srcids = idv#stack_out["idv"].data srcph_obsids = ph_obsid#stack_out["obsid"].data srcobs_srcids = np.array(exp_out["source_id"])#idv[exp_out["source_obs_ind"].data] srcobs_obsids = np.array(exp_out["source_obs_obs"]) srcobs_soids=np.array([str(i)+" "+str(int(j)) for i,j in zip(srcobs_srcids,srcobs_obsids)]) srcph_soids = np.array([str(i,)+" "+str(int(j)) for i,j in zip(source_id,ph_obsid)]) ''' Index array initialization: sf_gen_indarrays is used to generate index arrays used to communicate data between source-lengt, srcobs-length, and srcph-length arrays ''' sp2s_sorted, sp2s_starts, sp2s_nums,sp2s_indices=sf_gen_indarrays(srcph_srcids,src_srcids) sp2o_sorted, sp2o_starts, sp2o_nums,sp2o_indices=sf_gen_indarrays(srcph_obsids,obs_obsids) so2s_sorted, so2s_starts, so2s_nums,so2s_indices=sf_gen_indarrays(srcobs_srcids,src_srcids) #print("so2s_starts",so2s_starts) so2o_sorted, so2o_starts, so2o_nums,so2o_indices=sf_gen_indarrays(srcobs_obsids,obs_obsids) sp2so_sorted, sp2so_starts, sp2so_nums,sp2so_indices=sf_gen_indarrays(srcph_soids,srcobs_soids) #initialize source-length data arrays; to be computed later srcradv,r90v=np.zeros(n_sources),np.zeros(n_sources) srcareav,obsidv,mdistv=np.zeros(n_sources),np.zeros(n_sources),np.zeros(n_sources) #initialize exposure calculation arrays; to be computed later src_exp_sky,src_exp_det,src_area_exp_sky=np.zeros(n_sources),np.zeros(n_sources),\ np.zeros(n_sources) src_area_exp_det,src_bg_ctss,src_bg_cths=np.zeros(n_sources),np.zeros(n_sources),\ np.zeros(n_sources) #src_bg_ctss_flux,src_bg_cths_flux=np.zeros(n_sources),np.zeros(n_sources) #initialize filter arrays; to be computed later i_src_valid_mdist=np.zeros(n_sources).astype(int) i_srcph_non_excluded = photon_source_int_blank + 1 i_src_idlist = np.zeros(n_sources).astype(int) #Compute srcrad, the source radius in pixels from r90 (arcseconds) srcrad_in = (r90 * r90_frac) / 0.492 #Set a hard minimum of srcrad of 3 pixels (RCH 11/21/13) srcrad=np.clip(srcrad_in,3,None) '''Compute photons-per-source (nph), photons-per-source in source aperture (nph_src), and photons-per-source in background aperture (nph_bg) at source-length, and srcph-length''' i_srcph_srcaper = (dist<=srcrad) #Compute id_step, used for binning/ drawing the histograms later id_step=0#np.min(np.diff(idv)) ''' Part 3: Specific aperture radius is applied to the exposure data; if necessary, interpolation will be used on the r90 fractions these hold the exposure information for srcobs pairs ''' srcobs_exp_sky,srcobs_exp_det=np.zeros(n_photon_source_matches),np.zeros(n_photon_source_matches) srcobs_bgh,srcobs_bgs=np.zeros(n_photon_source_matches),np.zeros(n_photon_source_matches) #these store the variance in the exposure time srcobs_exp_det_var,srcobs_exp_sky_var=np.zeros(n_photon_source_matches),np.zeros(n_photon_source_matches) '''R90_FRACS is from make_stack_list, r90_frac is the actual fractions ind_frac indicates whether we have exposure calculated for this r90, or we have to interpolate''' ind_frac = np.where(R90_FRACS==r90_frac)[0] if len(ind_frac)>0: srcobs_exp_sky = exp_out["source_obs_exp_sky"][:, ind_frac] srcobs_exp_det = exp_out["source_obs_exp_det"][:, ind_frac] srcobs_exp_det_var = exp_out["source_obs_exp_det_var"][:, ind_frac] srcobs_exp_sky_var = exp_out["source_obs_exp_sky_var"][:, ind_frac] if use_bgh: srcobs_bgh = exp_out["source_obs_bgh"][:, ind_frac] if use_bgs: srcobs_bgs = exp_out["source_obs_bgs"][:, ind_frac] else: for i in np.arange(len(source_obs_ind)): source_obs_exp_interp=interp1d(R90_FRACS,exp_out["source_obs_exp_sky"][i,:]) srcobs_exp_sky[i]=source_obs_exp_interp(r90_frac) source_obs_det_interp=interp1d(R90_FRACS,exp_out["source_obs_exp_det"][i,:]) srcobs_exp_det[i]=source_obs_det_interp(r90_frac) if use_bgs: source_obs_bgs_interp=interp1d(R90_FRACS,exp_out["source_obs_bgs"][i,:]) srcobs_bgs[i]=source_obs_bgs_interp(r90_frac) if use_bgh: source_obs_bgh_interp=interp1d(R90_FRACS,exp_out["source_obs_bgh"][i,:]) srcobs_bgh[i]=source_obs_bgh_interp(r90_frac) #weight background by flux count: srcobs_bgs_flux,srcobs_bgh_flux=srcobs_bgs.reshape(-1)*exp_out["flux_c_soft"],srcobs_bgh.reshape(-1)*exp_out["flux_c_hard"] '''Part 4: Generate exposure filters With the srcobs-length exposure calculations, we can now compute a couple more filter arrays''' if min_exp_frac==0: igood_obs_exp = (srcobs_exp_det>0).astype(int) else: igood_obs_exp = (srcobs_exp_det>pidl_funcs.avoid_nans((min_exp_frac*exp_out["max_det_exp"]),so2o_indices)).astype(int) igood_exp = (energy==0).astype(int)+((energy!=0).astype(int))*pidl_funcs.avoid_nans(igood_obs_exp,sp2so_indices) #check to see if any of the exposures produced NaNs #what is going on here: #inans = np.where(source_obs_exp_sky[:,0]!=source_obs_exp_sky[:,0])[0] #TA: inans is uncertain where we are trying to find a nan inans = np.where(np.isnan(exp_out["source_obs_exp_sky"][:,0]))[0] #print("inans",exp_out["source_obs_exp_sky"][inans,0]) if len(inans)>0: badobsids=srcobs_obsids[inans] badobsid_uniq = np.unique(badobsids) n_badobs = len(badobsid_uniq) for kk in np.arange(n_badobs): #Zero out the exposure for the bad obsids ibadexp=np.where(srcph_obsids==badobsid_uniq[kk])[0] igood_exp[ibadexp]=0 #Zero out the counts for the bad obsids i_srcph_srcaper[ibadexp]=0 i_srcph_bgaper[ibadexp]=0 ibadobs=np.where(srcobs_obsids==badobsid_uniq[kk])[0] igood_obs_exp[ibadobs]=0 igood_obs_mdist = (exp_out["source_obs_mdist"].flatten()0: id0iv=id0[iv] + np.arange(src_nph[iv]) src_nph_src[iv] = np.sum(i_srcph_srcaper[id0iv]) src_nph_bg[iv] = np.sum(i_srcph_bgaper[id0iv]) #print("src_nph_src",len(src_nph_src),np.any(src_nph_src>0)) srcph_nph = pidl_funcs.avoid_nans(src_nph,sp2s_indices) #src_nph[sp2s_indices] srcph_nph_src = pidl_funcs.avoid_nans(src_nph_src,sp2s_indices)#src_nph_src[sp2s_indices] srcph_nph_bg = pidl_funcs.avoid_nans(src_nph_bg,sp2s_indices)#src_nph_bh[sp2s_indices] #Compute filter arrays: #Energy band filters i_srcph_softe = np.logical_and(energy>softe_new[0],energy<=softe_new[1]) # selects soft-band photons i_srcph_harde = np.logical_and(energy>harde_new[0],energy<=harde_new[1]) # selects hard-band photons #Valid data filters, srcph-length i_srcph_matched_srcobs = pidl_funcs.avoid_nans(~np.isnan(so2s_starts),sp2s_indices)#(so2s_starts!=np.nan)[sp2s_indices] # selects valid obsid i_srcph_valid_mdist = np.logical_and(mdist0.) # selects valid mdist i_srcph_good_obsid = ~bad.astype(bool) # selects good obsids i_srcph_valid_lim = (srcph_nph_src>=lim[0]) #Valid data filters, source-length i_src_matched_srcobs = (~np.isnan(so2s_starts)) # selects sources with source-obs matches i_src_good_obsid = i_srcph_good_obsid[id0] # selects sources from non-bad obsids i_src_valid_lim = (src_nph_src>=lim[0]) #Part 5: Exclude sources on the exclude list if exclude_list!="NA": '''exclude the photons around certain user input sources from the stack updated to fix excision of .sav RCH 7 Aug 2013''' fits_file=stack_file.split('.')[0]+".fits" inexclude=exclude_sources(exclude_list, fits_file,x,y,i_srcph_non_excluded) source_obs_exp=update_exposure(exclude_list,fits_file,source_ra[id0],source_dec[id0], exp_out["source_obs_ind"].flatten(), exp_out["source_obs_srcrad"].flatten(), np.zeros(len(exp_out["source_obs_srcrad"].flatten()))) if use_bgh: srcobs_bgh=update_exposure(exclude_list, fits_file, source_ra[id0], source_dec[id0], exp_out["source_obs_ind"].flatten(), exp_out["source_obs_srcrad"].flatten(), np.zeros(len(exp_out["source_obs_srcrad"].flatten()))) if use_bgs: srcobs_bgs=update_exposure(exclude_list, fits_file, source_ra[id0], source_dec[id0], exp_out["source_obs_ind"].flatten(), exp_out["source_obs_srcrad"].flatten(), np.zeros(len(exp_out["source_obs_srcrad"].flatten()))) ''' Part 6: For-Loop Iterating Over Sources Information that is stored for multiple source-obs pairs is concatenated into a single value for each source added to optimize searching for each individual source in source_obs_ind ''' hist_obs_ind_max = np.max(exp_out["source_obs_ind"].flatten()) hist_source_obs_ind,rev_src_ind,ed=pidl_funcs.hist_nd(exp_out["source_obs_ind"].flatten(),-0.5,hist_obs_ind_max+0.5, 1) len_id0=len(id0)#-1 for k in np.arange(len_id0): if (rev_src_ind[k]!=rev_src_ind[k+1]) and (rev_src_ind[k]!=len(rev_src_ind)): ind = rev_src_ind[rev_src_ind[k]:rev_src_ind[k+1]]#rev_src_ind[rev_src_ind[k]:rev_src_ind[k+1]+1] src_exp_sky[k] = np.sum(srcobs_exp_sky[ind].flatten()*igood_obs_all[ind]) src_exp_det[k] = np.sum(srcobs_exp_det[ind].flatten()*igood_obs_all[ind]) if use_bgs: src_bg_ctss[k]=np.sum(srcobs_bgs[ind]) if use_bgh: src_bg_cths[k]=np.sum(srcobs_bgh[ind]) if np.any((igood_obs_all[ind]).astype(bool)): indg=ind[(igood_obs_all[ind]).astype(bool)] ''' get the minimum value for mdistv/r90v/srcradv in the case of multiple observations''' mdistv[k] = np.median(exp_out["source_obs_mdist"].flatten()[indg])# * igood_obs_all[indg]) #print("indg",indg) #print("indg",(exp_out["source_obs_mdist"].data).flatten()[indg],k) r90v[k] = np.median(exp_out["source_obs_r90"].flatten()[indg])# * igood_obs_all[indg]) #print("mdistv[k]",mdistv[k], exp_out["source_obs_mdist"].flatten()[indg]) srcradv[k] = r90v[k]/0.492 indg_min = indg[(indg==np.min(indg))] obsidv[k] = srcobs_obsids[indg_min[0]] #get the total area if detected in multiple exposures srcareav[k] = np.sum(np.pi*(exp_out["source_obs_srcrad"].flatten()[indg]*\ r90_frac*igood_obs_all[indg]*0.492)**2.) src_area_exp_sky[k] = np.sum(srcobs_exp_sky[indg]*igood_obs_all[indg]*\ np.pi*(exp_out["source_obs_srcrad"].flatten()[indg]*r90_frac*\ 0.492)**2.) src_area_exp_det[k] = np.sum(srcobs_exp_det[indg]*igood_obs_all[indg]*\ np.pi*(exp_out["source_obs_srcrad"].flatten()[indg]*r90_frac*\ 0.492)**2.) i_src_valid_mdist = np.logical_and(mdistv0) iuse = i_srcph_matched_srcobs*i_srcph_valid_mdist*i_srcph_good_obsid*i_srcph_valid_lim*\ i_srcph_non_excluded*igood_all i_src_valid = i_src_valid_mdist*i_src_good_obsid*i_src_valid_lim*i_src_matched_srcobs with open(parameter_outfile, 'wb') as f: # Python 3: open(..., 'wb') pickle.dump([i_srcph_softe, i_srcph_harde, i_srcph_srcaper, i_srcph_bgaper,\ i_src_valid_lim, i_src_valid_mdist, i_src_good_obsid,\ i_src_matched_srcobs, iuse, i_src_valid, i_flux_from_soft, i_flux_from_hard,\ src_nph_src, src_nph,\ use_bgs, use_bgh, src_bg_ctss, src_bg_cths, src_area_exp_det,\ src_area_exp_sky, src_exp_sky, src_exp_det, srcradv, mdistv,\ obsidv, source_id, id0, idv, x,y, id_step], f)#src_bg_ctss_flux,src_bg_cths_flux,\ return def update_exposure(exclude_list,stacked_evt,source_ra,source_dec, source_obs_ind,source_obs_srcrad,source_obs_exp): exclude_info=ascii.read(exclude_list,names=["ra","dec","rad"]) hdu=fits.read(stacked_evt) event_head=hdu[1].header exclude_x,exclude_y=pidl_funcs.ad2xy(event_head,exclude_info["ra"].data,exclude_info["dec"].data) source_x,source_y=pidl_funcs.ad2xy(event_head,source_ra,source_dec) #replicate source coordinates for different observations source_x = source_x[source_obs_ind] source_y = source_y[source_obs_ind] exclude_rad=exclude_info["rad"].data/0.492 bin_size=2*(np.max(exclude_rad)+np.max(source_obs_srcrad)) hist_minx,hist_maxx=np.min(exclude_x)-bin_size/2.,np.max(exclude_x)+bin_size/2. hist_miny,hist_maxy=np.min(exclude_y)-bin_size/2.,np.max(exclude_y)+bin_size/2. hist_sources,rev_sources=pidl_funcs.hist_nd(np.array([source_x,source_y]),[hist_minx,hist_miny], [hist_maxx,hist_maxy],[bin_size,bin_size]) hist_size=len(hist_sources) '''we find the x and y distances of the excluded sources to the origin of the histogram (in units = 1 bin_size)''' excl_dist_x,excl_dist_y=(exclude_x-hist_minx)/bin_size,(exclude_y-hist_miny)/bin_size#np.zeros(len(exclude_x)),np.zeros(len(exclude_y)) #the whole part of this scaled distance gives the index of the current bin ind_bin_x,ind_bin_y=np.floor(excl_dist_x),np.floor(excl_dist_y) '''the fraction can show in which part of the bin we're in this way we can calculate the indices of the other 3 bins we're interested in''' bin_offset_xraw = excl_dist_x - ind_bin_x - 0.5 bin_offset_yraw = excl_dist_y - ind_bin_y - 0.5 bin_offset_x = np.sign(bin_offset_xraw) bin_offset_y = np.sign(bin_offset_yraw) #expand to four adjacent bins bins_x = np.concatenate((ind_bin_x, ind_bin_x+bin_offset_x)) bins_y = np.concatenate((ind_bin_y, ind_bin_y+bin_offset_y)) bin_ind=0 for ind_excl in np.arange(len(exclude_x)): ind_sources = [-1] #get the bin index of the bin the current source falls in bin_ind = bins_y[2*ind_excl] * hist_size[0] + bins_x[2*ind_excl] #and the indices of the sources inside it if ((rev_sources[bin_ind+1] - rev_sources[bin_ind])!=0): ind_sources = np.concatenate((ind_sources, rev_sources[rev_sources[bin_ind]:rev_sources[bin_ind+1]])) #do the same for the adjacent bins if bins_x[2*ind_excl+1]!=bins_x[2*ind_excl]: bin_ind = bins_y[2*ind_excl] * hist_size[0] + bins_x[2*ind_excl+1] valid_bin = np.logical_and(bin_ind>=0,bin_ind=0,bin_ind=0, bin_ind1))[0] i2in1 = np.where(np.logical_and(arg1>1, arg2<-1))[0] ivenn = np.where(np.logical_and(np.abs(arg1)<1,np.abs(arg2)<1))[0] overlap=np.zeros(len(ind_olap)) if len(i1in2)>0: overlap[i1in2]=1 if len(i2in1)>0: overlap[i2in1]=r22/r12[i2in1] #for the general case - partial overlap if len(ivenn)>0: theta1=2.*np.arccos(arg1[ivenn]) theta2=2.*np.arccos(arg2[ivenn]) overlap[ivenn]=(0.5*r22*(theta2-np.sin(theta2))+0.5*r12[ivenn]*(theta1-np.sin(theta1)))/(np.pi*r12[ivenn]) source_obs_exp[ind_sources[ind_olap]]=source_obs_exp[ind_sources[ind_olap]]*(1.-overlap) return source_obs_exp def exclude_sources(exclude_list,stacked_evt,x,y,inexclude): ''' incomplete - valid bin - should it be np.any where is inexclude defined? ''' exclude_info=ascii.read(exclude_list,names=["ra","dec","rad"]) hdu=fits.read(stacked_evt) event_head=hdu[1].header exclude_x,exclude_y=pidl_funcs.ad2xy(event_head,exclude_info["ra"].data,exclude_info["dec"].data) exclude_rad=exclude_info["rad"].data/0.492 bin_size=2.*np.max(exclude_rad) hist_minx,hist_maxx=np.min(exclude_x)-bin_size/2.,np.max(exclude_x)+bin_size/2. hist_miny,hist_maxy=np.min(exclude_y)-bin_size/2.,np.max(exclude_y)+bin_size/2. hist_photons,rev_photons=pidl_funcs.hist_nd(np.array([x,y]),[hist_minx,hist_miny], [hist_maxx,hist_maxy],[bin_size,bin_size]) hist_size=len(hist_photons) '''we find the x and y distances of the excluded sources to the origin of the histogram (in units = 1 bin_size)''' excl_dist_x,excl_dist_y=(exclude_x-hist_minx)/bin_size,(exclude_y-hist_miny)/bin_size#np.zeros(len(exclude_x)),np.zeros(len(exclude_y)) #the whole part of this scaled distance gives the index of the current bin ind_bin_x,ind_bin_y=np.floor(excl_dist_x),np.floor(excl_dist_y) '''the fraction can show in which part of the bin we're in this way we can calculate the indices of the other 3 bins we're interested in''' bin_offset_xraw = excl_dist_x - ind_bin_x - 0.5 bin_offset_yraw = excl_dist_y - ind_bin_y - 0.5 bin_offset_x = np.sign(bin_offset_xraw) bin_offset_y = np.sign(bin_offset_yraw) #expand to four adjacent bins bins_x = np.concatenate((ind_bin_x, ind_bin_x+bin_offset_x)) bins_y = np.concatenate((ind_bin_y, ind_bin_y+bin_offset_y)) bin_ind=0 for ind_excl in np.arange(len(exclude_x)): ind_photons = [-1] #get the bin index of the bin the current source falls in bin_ind = bins_y[2*ind_excl] * hist_size[0] + bins_x[2*ind_excl] #and the indices of the photons inside it if ((rev_photons[bin_ind+1] - rev_photons[bin_ind])!=0): ind_photons = np.concatenate((ind_photons, rev_photons[rev_photons[bin_ind]:rev_photons[bin_ind+1]])) #do the same for the adjacent bins if bins_x[2*ind_excl+1]!=bins_x[2*ind_excl]: bin_ind = bins_y[2*ind_excl] * hist_size[0] + bins_x[2*ind_excl+1] valid_bin = np.logical_and(bin_ind>=0,bin_ind=0,bin_ind=0, bin_ind0: sub_a,sub_b=pidl_funcs.match2_quick(uniq_array,supers)#match2(uniq_array,supers,False) #sub_b=match2_only_subb(uniq_array,supers) matches=np.where(~np.isnan(sub_b))[0] #print("number of matches with ",len(np_array),"array for", len(supers),"array is", len(matches)) if len(matches)>0: starts_old=copy.deepcopy(starts) starts=copy.deepcopy(sub_b) #print("sub_b[matches]",sub_b[matches]) starts[matches]=starts_old[(sub_b[matches]).astype(int)] nums_old=copy.deepcopy(nums) nums=copy.deepcopy(sub_b) nums[matches]=nums_old[(sub_b[matches]).astype(int)] h,ri,ed=pidl_funcs.hist_nd(np.cumsum(nums_old)-1,0,np.max(np.cumsum(nums_old)),1) indices=(sub_a[ri[0:len(h)]-ri[0]])[np.argsort(sorted_ar)] #print("indices",min(indices[np.where(~np.isnan(indices))[0]]),max(indices[np.where(~np.isnan(indices))[0]])) return sorted_ar,starts,nums,indices.astype(int)#(indices[np.where(~np.isnan(indices))[0]]).astype(int) else: return sorted_ar,starts,nums,np.array([]) def sf_perform_background(mdistmax,softe,harde,r90_frac): #,R90_FRACS,database_dir,path_skys,path_dets): #set_average): """ set up multiprocessing for sf_calc_exp """ PROC_NUM=4#4 multiPROC_NUM=300 global source_id,ph_obsid,id0,mdist,mdistmaxi,srcrad,dist,boxsize,i_srcph_bgaper,loenergy,\ hienergy,nsrcs,theta,idv,id0,energy,use_subset,i_flux_from_soft,i_flux_from_hard if use_subset: save_names=prepare_file_name.split("/")[-1] save_names=save_names.split(".") with open(subs_namey+save_names[0]+"_subset."+save_names[1], 'rb') as f: stack_file,boxsize,source_id,ra,dec,x,y,old_x,old_y,\ energy,source_ra,source_dec,mdist,r90,badid,bad,dist,\ id0,idv,nphv,ph_obsid,i_flux_from_soft,i_flux_from_hard = pickle.load(f) else: with open(prepare_file_name, 'rb') as f: stack_file,boxsize,source_id,ra,dec,x,y,old_x,old_y,\ energy,source_ra,source_dec,mdist,r90,badid,bad,dist,\ id0,idv,nphv,ph_obsid,i_flux_from_soft,i_flux_from_hard = pickle.load(f) mdistmaxi=mdistmax ''' id0 is a concat of sources that are within and not within the obsids, so need to ignore objects that are not within the obsids ''' nsrcs = len(idv)#np.argmax(idv) + 1 #Define index arrays corresponding to energy np.aranges loenergy=np.logical_and(energy>=softe[0],energy<=softe[1]) hienergy=np.logical_and(energy>harde[0], energy<=harde[1]) print('Beginning background analysis') #Simple apertures to begin with srcrad_in = r90*r90_frac/0.492 srcrad=np.clip(srcrad_in,3.,None) i_srcph_srcaper = (dist<=srcrad) ''' Now analyze the 'background' region as a function of theta from the source position. ''' theta=180.+(180./np.pi*np.arctan2(x,y)) # degrees from 0. to 360. #bkg_frac=np.zeros(nsrcs)+1. # all backgrounds are set to 100% initially #Initialize the outputs ti=time.time() with closing(Pool(processes=PROC_NUM)) as pool: resu=pool.map(sf_perform_background_multiprocess_matrix_subset,np.arange(nsrcs)) resu=np.array(resu) i_srcph_bgaper=np.concatenate(resu[:,0]) srcph_bgfrac=np.array(list(resu[:,1])) bg_in_srcaper_soft,bg_in_srcaper_hard=np.array(list(resu[:,2])),np.array(list(resu[:,3]))#np.stack(resu[:,2]),np.stack(resu[:,3]) bg_in_srcaper_soft_flux,bg_in_srcaper_hard_flux=np.array(list(resu[:,4])),np.array(list(resu[:,5])) pool.terminate() print("time taken to calculate background",time.time()-ti) if use_subset: save_names=background_save_file.split("/")[-1] save_names=save_names.split(".") with open(subs_namey+save_names[0]+"_subset."+save_names[1], 'wb') as f: # Python 3: open(..., 'wb') pickle.dump([i_srcph_bgaper.astype(bool),srcph_bgfrac,bg_in_srcaper_soft,bg_in_srcaper_hard,bg_in_srcaper_soft_flux,bg_in_srcaper_hard_flux], f) else: with open(background_save_file, 'wb') as f: pickle.dump([i_srcph_bgaper.astype(bool),srcph_bgfrac,bg_in_srcaper_soft,bg_in_srcaper_hard,bg_in_srcaper_soft_flux,bg_in_srcaper_hard_flux],f) return i_srcph_bgaper,srcph_bgfrac,bg_in_srcaper_soft,bg_in_srcaper_hard,bg_in_srcaper_soft_flux,bg_in_srcaper_hard_flux #large matrix takes too long #def multi_map(element): def sf_perform_background_multiprocess_matrix(n): #phtons in a source per obsid if n<(nsrcs-1): phots_src[id0[n]:(id0[n+1])]=1 obsid_src=ph_obsid[id0[n]:(id0[n+1])] else: phots_src[id0[n]:]=1 obsid_src=ph_obsid[id0[n]:] bkg_frac,bg_in_srcaper_soft,bg_in_srcaper_hard=1.,0,0 t11=time.time() obsid_src_uniq=np.unique(obsid_src) for m in np.arange(len(obsid_src_uniq)): #find all the phorons within this obsid: foo_obsid=(ph_obsid==obsid_src_uniq[m]) phots_src_obsid=phots_src*(foo_obsid).astype(int) obsid_mdist=mdist[np.where((phots_src_obsid).astype(bool))[0]] obsid_mdist=obsid_mdist[0] if obsid_mdist(obsid_src_rad*1.3),dist <=boxsize/2.) bgphots_src_obsid=phots_src_obsid*i_bg_phots_src_obsid #background photons for this obsid theta_to_bin_map=np.digitize(theta,np.arange(0,361,30))-1 theta_to_bin_map=np.tile(theta_to_bin_map,12).reshape(12,len(theta_to_bin_map)).T==np.arange(12) separate_bins=np.full(len(theta),1)*theta_to_bin_map.T ireg=np.sum(((bgphots_src_obsid).astype(bool)*separate_bins).astype(int),axis=1) ibad_bg=np.logical_and(ireg>(np.median(ireg)+(2.*np.std(ireg))),ireg>2) ibad_bg=np.where(ibad_bg)[0] if len(ibad_bg)>0: thet_condition=np.sum(theta_to_bin_map[ibad_bg],axis=0).astype(bool) i_srcph_bgaper[np.where(thet_condition)]=0 #bkg_frac[n]=(12.-len(ibad_bg))/12. bkg_frac=(12.-len(ibad_bg))/12. theta_to_bin_map=0 src_area=np.pi*obsid_src_rad**2 bkg_area=np.pi*((boxsize/2.)**2-(obsid_src_rad*1.3)**2) #print("bkg area",bkg_area, bkg_frac[n], bkg_area/bkg_frac[n]) bg_in_srcaper_soft+=np.sum(bgphots_src_obsid*loenergy*i_srcph_bgaper)*src_area/(bkg_area/bkg_frac) bg_in_srcaper_hard+=np.sum(bgphots_src_obsid*hienergy*i_srcph_bgaper)*src_area/(bkg_area/bkg_frac) return i_srcph_bgaper,bkg_frac,bg_in_srcaper_soft,bg_in_srcaper_hard, n def sf_perform_background_multiprocess(n): #phtons in a source per obsid if n<(nsrcs-1): phots_src[id0[n]:(id0[n+1])]=1 obsid_src=ph_obsid[id0[n]:(id0[n+1])] else: phots_src[id0[n]:]=1 obsid_src=ph_obsid[id0[n]:] bkg_frac,bg_in_srcaper_soft,bg_in_srcaper_hard=1.,0,0 t11=time.time() obsid_src_uniq=np.unique(obsid_src) #print("bkg_frac[n] before",n,bkg_frac[n]) #print("bg_in_srcaper_soft[n] before",n,bg_in_srcaper_soft[n]) for m in np.arange(len(obsid_src_uniq)): #find all the phorons within this obsid: foo_obsid=(ph_obsid==obsid_src_uniq[m]) phots_src_obsid=phots_src*(foo_obsid).astype(int) obsid_mdist=mdist[np.where((phots_src_obsid).astype(bool))[0]] obsid_mdist=obsid_mdist[0] if obsid_mdist(obsid_src_rad*1.3),dist <=boxsize/2.) bgphots_src_obsid=phots_src_obsid*i_bg_phots_src_obsid #background photons for this obsid ireg=np.zeros(12) #there are 360/30. background regions for itheta in np.arange(12): thet_condition=np.logical_and(theta>=itheta*30,theta<(itheta+1)*30) ireg[itheta]=np.sum(np.logical_and(thet_condition,(bgphots_src_obsid).astype(bool)).astype(int)) #Region in 2pi with strangely high 'background' this should #remove bright sources lurking in the background ibad_bg=np.logical_and(ireg>(np.median(ireg)+(2.*np.std(ireg))),ireg>2) ibad_bg=np.where(ibad_bg)[0] if len(ibad_bg)>0: for k in np.arange(len(ibad_bg)): thet_condition=np.logical_and(theta>=ibad_bg[k]*30,theta<(ibad_bg[k]+1)*30) bad_phots=np.logical_and(thet_condition,(bgphots_src_obsid).astype(bool)) i_srcph_bgaper[np.where(bad_phots)[0]]=0 thet_condition=0 #bkg_frac[n]=(12.-len(ibad_bg))/12. bkg_frac=(12.-len(ibad_bg))/12. src_area=np.pi*obsid_src_rad**2 bkg_area=np.pi*((boxsize/2.)**2-(obsid_src_rad*1.3)**2) bg_in_srcaper_soft+=np.sum(bgphots_src_obsid*loenergy*i_srcph_bgaper)*src_area/(bkg_area/bkg_frac) bg_in_srcaper_hard+=np.sum(bgphots_src_obsid*hienergy*i_srcph_bgaper)*src_area/(bkg_area/bkg_frac) return i_srcph_bgaper,bkg_frac,bg_in_srcaper_soft,bg_in_srcaper_hard, n def sf_perform_background_no_multiprocess(mdistmax,softe,harde,r90_frac): with open(prepare_file_name, 'rb') as f: stack_file,boxsize,source_id,ra,dec,x,y,old_x,old_y,\ energy,source_ra,source_dec,mdist,r90,badid,bad,dist,\ id0,idv,nphv,ph_obsid = pickle.load(f) hdul=fits.open(exp_file) exp_out=hdul[1].data hdul=0 sources_file=ascii.read(sources_file_name) ''' id0 is a concat of sources that are within and not within the obsids, so need to ignore objects that are not within the obsids ''' nsrcs = np.argmax(idv) + 1 #Define index arrays corresponding to energy np.aranges loenergy=np.logical_and(energy>=softe[0],energy<=softe[1]) hienergy=np.logical_and(energy>harde[0], energy<=harde[1]) print('Beginning background analysis') #Simple apertures to begin with srcrad_in = r90*r90_frac/0.492 srcrad=np.clip(srcrad_in,3.,None) i_srcph_bgaper = np.logical_and(dist>(srcrad*1.2), dist<=boxsize/2.) i_srcph_srcaper = (dist<=srcrad) ''' Now analyze the 'background' region as a function of theta from the source position. ''' theta=180.+(180./np.pi*np.arctan2(x,y)) # degrees from 0. to 360. bkg_frac=np.zeros(nsrcs)+1. # all backgrounds are set to 100% initially #Initialize the outputs bg_in_srcaper_soft = np.zeros(nsrcs) bg_in_srcaper_hard = np.zeros(nsrcs) ti=time.time() for n in np.arange(nsrcs): phots_src=np.zeros(len(i_srcph_bgaper)) #phtons in a source per obsid if n<(nsrcs-1): phots_src[id0[n]:(id0[n+1])]=1 obsid_src=ph_obsid[id0[n]:(id0[n+1])] else: phots_src[id0[n]:]=1 obsid_src=ph_obsid[id0[n]:] obsid_src_uniq=np.unique(obsid_src) for m in np.arange(len(obsid_src_uniq)): #find all the phorons within this obsid: foo_obsid=(ph_obsid==obsid_src_uniq[m]) phots_src_obsid=phots_src*(foo_obsid).astype(int) obsid_mdist=mdist[np.where((phots_src_obsid).astype(bool))[0]] obsid_mdist=obsid_mdist[0] if obsid_mdist(obsid_src_rad*1.3),dist<=boxsize/2.) bgphots_src_obsid=phots_src_obsid*i_bg_phots_src_obsid #background photons for this obsid ireg=np.zeros(12) #there are 360/30. background regions for itheta in np.arange(12): thet_condition=np.logical_and(theta>=itheta*30,theta<(itheta+1)*30) ireg[itheta]=np.sum(np.logical_and(thet_condition,(bgphots_src_obsid).astype(bool)).astype(int)) '''Region in 2pi with strangely high 'background' this should remove bright sources lurking in the background''' ibad_bg=np.logical_and(ireg>(np.median(ireg)+(2.*np.std(ireg))),ireg>2) ibad_bg=np.where(ibad_bg)[0] if len(ibad_bg)>0: for k in np.arange(len(ibad_bg)): thet_condition=np.logical_and(theta>=ibad_bg[k]*30,theta<(ibad_bg[k]+1)*30) bad_phots=np.logical_and(thet_condition,(bgphots_src_obsid).astype(bool)) i_srcph_bgaper[np.where(bad_phots)[0]]=0 bkg_frac[n]=(12.-len(ibad_bg))/12. src_area=np.pi*obsid_src_rad**2 bkg_area=np.pi*((boxsize/2.)**2-(obsid_src_rad*1.3)**2) #print("bkg area",bkg_area, bkg_frac[n], bkg_area/bkg_frac[n]) bg_in_srcaper_soft[n]+=np.sum(bgphots_src_obsid*loenergy*i_srcph_bgaper)*src_area/(bkg_area/bkg_frac[n]) bg_in_srcaper_hard[n]+=np.sum(bgphots_src_obsid*hienergy*i_srcph_bgaper)*src_area/(bkg_area/bkg_frac[n]) #print(str(n*100./nsrcs)+"% completed") print("time taken to calculate background",time.time()-ti) srcph_bgfrac = bkg_frac with open(background_save_file, 'wb') as f: pickle.dump([i_srcph_bgaper,srcph_bgfrac,bg_in_srcaper_soft,bg_in_srcaper_hard],f) #["i_srcph_softe", "i_srcph_harde", "i_srcph_srcaper", "i_srcph_bgaper", "iuse"] return i_srcph_bgaper,srcph_bgfrac,bg_in_srcaper_soft,bg_in_srcaper_hard def experimental_sf_create_src_imgs(x,y,energy,source_id,idv,id0,source_obs_ind, source_obs_obs,sources_id,sources_ra, sources_dec,boxsize): ti=time.time() srcobs_srcids=idv[source_obs_ind] #need to make image directory if it doesn' exist: if not glob.glob(image_dir): #!mkdir image_dir os.mkdir(image_dir) #Create directory for exposure maps? if not glob.glob(exp_dir): #!mkdir exp_dir os.mkdir(exp_dir) ''' id0 is a concat of sources that are within and not within the obsids, so need to ignore objects that are not within the obsids not sure why we add 1 ''' global source_obs_obss global srcobs_srcidss global source_ids #global datdirs global expdirs global sources_ids global sources_ras global sources_decs global boxsizes global idvs #global isrcs global id0s global obsid_uniqs global xs global ys obsid_uniq = np.unique(source_obs_obs) global nsrcs nsrcs = np.argmax(idv) source_obs_obss,srcobs_srcidss,expdirs=source_obs_obs,srcobs_srcids,exp_dir sources_ids,sources_ras,sources_decs,boxsizes=sources_id,sources_ra,sources_dec,boxsize obsid_uniqs,id0s,source_ids,xs,ys,idvs=obsid_uniq,id0,source_id,x,y,idv nobsid = len(obsid_uniq) #print("nobsid",nobsid) print('Creating individual source images') with closing(Pool(processes=4)) as pool: resu=pool.map(create_source_images,np.arange(nsrcs+1)) resu1=pool.map(exp_image_maker,np.arange(int(nobsid))) pool.terminate() print("All cutouts completed in", time.time()-ti) return def create_source_images(n): srcid = source_ids[id0s[n]] isrc=np.where(sources_ids==srcid)[0] #print("srcid, isrc",srcid,isrc) if n+1 < nsrcs: phots_x=xs[id0s[n]:(id0s[n+1])] phots_y=ys[id0s[n]:(id0s[n+1])] else: phots_x=xs[id0s[n]:] phots_y=ys[id0s[n]:] #print("phots_x,phots_y",phots_x,phots_y) img,rev_img,edgy=pidl_funcs.hist_nd(np.array([phots_x,phots_y]), (-1.*boxsize/2.,-1.*boxsize/2.), (boxsize/2.,boxsize/2.),(1,1)) #print(img) #Use id0 to name the file hdu = fits.PrimaryHDU(img) #print("writing image for",idvs[n]) hdu.writeto(image_dir+'/stack_'+str(srcid)+'_'+format(isrc[0],'06')+'_'+format(id0s[n],'06')+'.fits',overwrite=True) return def exp_image_maker(n): #print("n",n) iobs=np.where(source_obs_obss==obsid_uniqs[n])[0] #print("obsid_uniqs[n]",source_obs_obss,obsid_uniqs[n]) if len(iobs)>0: #Find the src IDs in the OBSID srcs_in_obsid = srcobs_srcidss[iobs] nsrcs_in_obsid = len(srcs_in_obsid) #Find the exposure map and open it get_link=np.where(DIR_OBSID_LIST==obsid_uniqs[n])[0][0] search_name=LINK_LIST[get_link] #f = gzip.open(search_name+'/analysis/057.exp.bin1.fits.gz') hdul = fits.open(search_name+'/analysis/057.exp.bin1.fits.gz') hdr=hdul[0].header expmap=hdul[0].data if np.max(expmap)>0: for kk in np.arange(nsrcs_in_obsid): isrc = np.where(sources_ids==srcs_in_obsid[kk])[0] rasrc = sources_ras[isrc] decsrc = sources_decs[isrc] #Convert SRC RA,DEC to X,Y in EXP MAP xsrc,ysrc=pidl_funcs.ad2xy(hdr,rasrc,decsrc) if xsrc<0: break #Extract a subimage: subimg,subhdr=pidl_funcs.hextract(expmap, hdr, [xsrc[0]-(boxsizes/2.), xsrc[0]+(boxsizes/2.)], [ysrc[0]-(boxsizes/2.), ysrc[0]+(boxsizes/2.)]) #save image: hdu_new=fits.PrimaryHDU(subimg,subhdr) hdu_new.writeto(expdirs+'/exp_'+str(srcs_in_obsid[kk])+'_'+format(isrc[0],'06')+'_'+format(obsid_uniqs[n],'06')+'.fits', overwrite=True) else: print('Bad Exposure Map in Obsid: '+str(obsid_uniqs[n])) dummy=np.zeros(int(boxsizes)*int(boxsizes)).reshape(int(boxsizes),int(boxsizes)) hdu_new=fits.PrimaryHDU(dummy) for kk in np.arange(nsrcs_in_obsid): isrc = np.where(sources_ids==srcs_in_obsid[kk])[0] hdu_new.writeto(expdirs+'/exp_'+str(srcs_in_obsid[kk])+'_'+format(isrc[0],'06')+'_'+format(obsid_uniqs[n],'06')+'.fits', overwrite=True) else: print("No source related to Obsid "+str(obsid_uniqs[n])) return #do stack def sf_newstack_id(idlist,ind_cts=True): global boxsize boxsize=float(outp_boxsize.value) #reload file all the variables from the parameter part as a pickle with open(parameter_outfile, 'rb') as f: i_srcph_softe, i_srcph_harde, i_srcph_srcaper, i_srcph_bgaper,\ i_src_valid_lim, i_src_valid_mdist, i_src_good_obsid,\ i_src_matched_srcobs, iuse, i_src_valid, i_flux_from_soft, i_flux_from_hard,\ src_nph_src, src_nph,\ use_bgs, use_bgh, src_bg_ctss, src_bg_cths, src_area_exp_det,\ src_area_exp_sky, src_exp_sky, src_exp_det, srcradv, mdistv,\ obsidv, source_id, id0, idv, x,y, id_step = pickle.load(f)#src_bg_ctss_flux,src_bg_cths_flux,\ #obsidv, srcs_bg, srch_bg, srcs_bg_obs, srch_bg_obs, sid, id0,\ with open(background_save_file, 'rb') as f: i_srcph_bgaper,srcph_bgfrac,bg_in_srcaper_soft,bg_in_srcaper_hard,bg_in_srcaper_soft_flux,bg_in_srcaper_hard_flux=pickle.load(f) ''' This part is done in sf_perform_background, and the information is contained in i_srcph_bgaper,i_srcph_srcaper For each photon: Identify its energy bin Identify whether it is within r90 AND Identify if it is on the outskirts of the region (> 1.2 r90 and within the stacked box) ''' ''' This part is done by sf_new_parameters: For each source/obsid combination: If it is in a good obsid If the exposure time is longer than the minimum If the source is within the maximum off-axis angle that we set Whether it is "valid" by some definition ''' ''' For each source/obsid combination: Use the background and/or exposure maps to determine: Exposure time (done in srcobs_exp_det_var,srcobs_exp_sky_var) Average background level ('source_obs_bgh','source_obs_bgs') ''' ''' #what we need to do here: For each source: We add and/or average all this together Total source counts in each energy band Average exposure time The average background rate in each energy band For each photon: Apply booleans on the levels of all the photons so we can chop and change the individual photons Create the arrays to calculate where bright sources are. ''' totbg=bg_in_srcaper_soft+bg_in_srcaper_hard totsrc=src_nph_src i_bright_src=(totsrc<170000) #WHY 170000 #print("i_bright_src",i_bright_src, totsrc) i_ph_bright_src=np.zeros(len(source_id)) #print("len(id0)",len(id0)) for n in np.arange(len(id0)): if n+11: id_step=np.diff(idv) # id_step not calculated correctly when 0 event source list encountered id_step=np.min(id_step[np.where(id_step>0.)[0]]) else: id_step=1 for k in np.arange(len(idv_whole_vals)): indv_whole=np.where(idv_whole==idv_whole_vals[k])[0] #this is just a very convoluted way to get np.array(len(inlist)): ind_whole=np.where(id_whole==idv_whole_vals[k])[0] ind_list_whole=np.where(list_whole==idv_whole_vals[k])[0] idv_curr_frac = idv_frac[indv_whole] id_curr_frac = id_frac[ind_whole] list_curr_frac = list_frac[ind_list_whole] hist_bin = id_step histmin = np.min(list_curr_frac) - (hist_bin/2.) histmax = np.max(list_curr_frac) + (hist_bin/2.) '''For the actual matching create histograms with equal sizes for both lists ''' #print("histmin,max",histmin,histmax,hist_bin,len(ind_whole),np.max(ind_whole),len(idlist)) hist_id,rev_id,ed = pidl_funcs.hist_nd(id_curr_frac, histmin, histmax, hist_bin) hist_idv,rev_idv,ed = pidl_funcs.hist_nd(idv_curr_frac, histmin, histmax, hist_bin) hist_idlist,rev_idlist,ed = pidl_funcs.hist_nd(list_curr_frac, histmin, histmax, hist_bin) #get the indices of the bins in the histogram where we have a match index_r = np.where(np.logical_and((hist_id!=0), (hist_idlist!=0)))[0] index_rv = np.where(np.logical_and((hist_idv!=0), (hist_idlist!=0)))[0] '''convert from indices of the bins to indices of the lists (see help for histogram with /reverse_indices)''' bin_start = rev_id[index_r] bin_end = rev_id[index_r + 1] bin_size = bin_end-bin_start match_indices=np.zeros(np.sum(bin_size)) bin_start_idlist = rev_idlist[index_r] index_match = 0 for ind in np.arange(len(bin_start)): if bin_size[ind]<=0: continue match_indices[index_match:index_match+bin_size[ind]] = bin_start[ind] + np.arange(bin_size[ind]) index_match += bin_size[ind] #stores the indices in id where there is a match #print("match indices",match_indices) matches = rev_id[(match_indices).astype(int)] #do the same for distinct sources bin_startv = rev_idv[index_rv] bin_endv = rev_idv[index_rv+1] bin_sizev = bin_endv - bin_startv match_indicesv = np.zeros(np.sum(bin_sizev)) index_matchv = 0 bin_startlist = rev_idlist[index_rv] bin_endlist = rev_idlist[index_rv+1] for indv in np.arange(len(bin_startv)): if bin_sizev[indv]==0: continue if bin_sizev[indv]>1: print('Duplicate ID match found, stopping') match_indicesv[index_matchv] = bin_startv[indv] if ind_cts: ind_inv[indv_whole[rev_idv[ (match_indicesv[ index_matchv]).astype(int)]]]=\ ind_list_whole[rev_idlist[bin_startlist[indv]]] ind_match[ind_list_whole[rev_idlist[ bin_startlist[indv]]]]=\ indv_whole[rev_idv[(match_indicesv[index_matchv]).astype(int)]] index_matchv+=bin_sizev[indv] #stores the indices in idv where there is a match matchesv = rev_idv[(match_indicesv).astype(int)] #print("ind_match",ind_match) if len(matches)0: #ctss stands for counts in the soft energy band in the source aperture ctss = np.sum(i_srcph_softe * i_srcph_srcaper * iuse) #counts in the soft energy band in the background aperture ctsb = np.sum(i_srcph_softe * i_srcph_bgaper * iuse) #counts in hard energy band in the source aperture cths = np.sum( i_srcph_harde * i_srcph_srcaper * iuse) #ascii.write([i_srcph_harde,i_srcph_srcaper,iuse,source_id,x,y],results_dir+"cths_main.txt", # names=["#i_srcph_harde","i_srcph_srcaper","iuse","source_id","x","y"],overwrite=True) #counts in the hard energy band in the background aperture cthb = np.sum(i_srcph_harde * i_srcph_bgaper * iuse) #ascii.write([i_srcph_harde,i_srcph_bgaper,iuse,source_id,x,y],results_dir+"cthb_main.txt", # names=["#i_srcph_harde","i_srcph_bgaper","iuse","source_id","x","y"],overwrite=True) #nsrc=0 nsrc=np.sum(i_src_valid) use_bgs=True frac_soft=0.5 frac_hard=1.-frac_soft #print("use_bgs, use_bgh",use_bgs,use_bgh) #calculate background counts if use_bgh and use_bgs: srcs_bg=np.sum(src_bg_ctss * i_src_valid) srch_bg=np.sum(src_bg_cths * i_src_valid) elif use_bgh: #if using a single exposure map, assign weights to soft and hard rates srcs_bg = frac_soft*np.sum(src_bg_cths * i_src_valid) srch_bg = frac_hard*np.sum(src_bg_cths * i_src_valid) elif use_bgs: srcs_bg=frac_soft*np.sum(src_bg_ctss * i_src_valid) srch_bh=frac_hard*np.sum(src_bg_ctss * i_src_valid) else: srcs_bg_det = (softbgrate_det/(3600.**2.)) * np.sum(src_area_exp_det * i_src_valid) srcs_bg_sky = (softbgrate_sky/(3600.**2.)) * np.sum(src_area_exp_sky * i_src_valid) srch_bg_det = (hardbgrate_det/(3600.**2.)) * np.sum(src_area_exp_det * i_src_valid) srch_bg_sky = (hardbgrate_sky/(3600.**2.)) * np.sum(src_area_exp_sky * i_src_valid) srcs_bg = srcs_bg_det + srcs_bg_sky srch_bg = srch_bg_det + srch_bg_sky #compute indices - moved here from earlier in stack_id - SG 08/07 indices=np.arange(np.max(src_nph)) #make arrays to say how many source and background photons you have #print("idlist",idlist) if ind_cts: imatch=np.where(ind_match!=-1)[0] imatchi=ind_match[imatch] for ivi in np.arange(len(idlist)): iv = ind_match[ivi] #print("ivi",ivi, iv, ind_match[ivi], idlist[ivi]) if iv==-1: ind_ctss[ivi] = 0. ind_cths[ivi] = 0. else: iv=iv.astype(int) if src_nph[iv]>0: id0iv = id0[iv] + indices[0:src_nph[iv]] ind_ctss[ivi] = np.sum(i_srcph_srcaper[id0iv]*i_srcph_softe[id0iv]*iuse[id0iv]) #print("ind ctss",i_srcph_srcaper[id0iv],i_srcph_softe[id0iv],iuse[id0iv]) ind_cths[ivi] = np.sum(i_srcph_srcaper[id0iv]*i_srcph_harde[id0iv]*iuse[id0iv]) ##this is for flux ind_ctss_flux[ivi] = np.sum(i_flux_from_soft[id0iv]*i_srcph_srcaper[id0iv]*i_srcph_softe[id0iv]*iuse[id0iv]) ind_cths_flux[ivi] = np.sum(i_flux_from_soft[id0iv]*i_srcph_srcaper[id0iv]*i_srcph_harde[id0iv]*iuse[id0iv]) ind_ctsb[ivi] = np.sum(i_srcph_bgaper[id0iv]*i_srcph_softe[id0iv]*iuse[id0iv]) ind_cthb[ivi] = np.sum(i_srcph_bgaper[id0iv]*i_srcph_harde[id0iv]*iuse[id0iv]) #print("ind_ctss[ivi]",ind_ctss[ivi],ind_cths[ivi],ind_ctsb[ivi],ind_cthb[ivi]) #for individual sources, calculate: #background from separate bg maps imatch=imatch.astype(int) imatchi=imatchi.astype(int) #print("imatch, imatchi",imatch, imatchi) #print("src_exp_sky",src_exp_sky,src_exp_sky[imatchi]) if use_bgh and use_bgs: ind_bgss[imatch] = src_bg_ctss[imatchi] ind_bghs[imatch] = src_bg_cths[imatchi] elif use_bgh: #background from a single map with weights ind_bgss[imatch] = frac_soft * src_bg_cths[imatchi] ind_bghs[imatch] = frac_hard * src_bg_cths[imatchi] elif use_bgs: ind_bgss[imatch] = frac_soft * src_bg_ctss[imatchi] ind_bghs[imatch] = frac_hard * src_bg_ctss[imatchi] else: '''AVERAGE background calculated from exposure calculate the detector and sky background separately, for calibration''' ind_bgss_sky[imatch] = (softbgrate_sky * src_area_exp_sky[imatchi]) / (3600.**2.) ind_bgss_det[imatch] = (softbgrate_det * src_area_exp_det[imatchi]) / (3600.**2.) ind_bghs_sky[imatch] = (hardbgrate_sky * src_area_exp_sky[imatchi]) / (3600.**2.) ind_bghs_det[imatch] = (hardbgrate_det * src_area_exp_det[imatchi]) / (3600.**2.) ind_bgss[imatch] = ind_bgss_sky[imatch] + ind_bgss_det[imatch] ind_bghs[imatch] = ind_bghs_sky[imatch] + ind_bghs_det[imatch] ind_exp_sky[imatch] = src_exp_sky[imatchi] ind_exp_det[imatch] = src_exp_det[imatchi] #now tell whether the inputs are within all the criteria ind_ilim[imatch] = i_src_valid_lim[imatchi] ind_imdist[imatch] = i_src_valid_mdist[imatchi] ind_igood[imatch] = i_src_good_obsid[imatchi] ind_iobs[imatch] = i_src_matched_srcobs[imatchi] ind_srcrad[imatch] = srcradv[imatchi] ind_mdist[imatch] = mdistv[imatchi] ind_obsid[imatch] = obsidv[imatchi] #print("imatch",len(imatch),max(imatch)) ind_istack[imatch] = i_src_valid_mdist[imatchi] * i_src_valid_lim[imatchi] * \ i_src_good_obsid[imatchi] * i_src_idlist[imatchi] * \ i_src_matched_srcobs[imatchi] '''Fix the NaNs in the exposure arrays from sources not included in the stacks''' #print("ind_exp_sky",ind_exp_sky) inans=np.where(np.isnan(ind_exp_sky))[0] ind_exp_sky[inans]=0. inans=np.where(np.isnan(ind_exp_det))[0] ind_exp_det[inans]=0. exp_sky_tot = np.sum( ind_exp_sky * ind_istack) exp_det_tot = np.sum( ind_exp_det * ind_istack) else: exp_sky_tot=0 exp_det_tot=0 if ind_cts: '''NaNs from sources not in field of view are a problem. So just set them to zero''' inans=np.where(np.isnan(bg_in_srcaper_soft))[0] bg_in_srcaper_soft[inans]=0. bg_in_srcaper_hard[inans]=0. #Now calculate the total backgrounds for all of the valid sources ifoo=np.where((i_src_valid).astype(bool))[0] srcs_bg_obs = np.sum(bg_in_srcaper_soft[ifoo])#*0.9 srch_bg_obs = np.sum(bg_in_srcaper_hard[ifoo])#*0.95 #and rewrite the old variables srcs_bg=srcs_bg_obs srch_bg=srch_bg_obs '''Scale up bg arrays to same size as ind_istack. ifoo and matchesv are the two variables to allow the array to be constructed. and zero out ind_ctsb and ind_cthb because these values are essentially useless and rewrite''' else: srcs_bg, srch_bg, srcs_bg_obs, srch_bg_obs = 0.,0.,0.,0. #substract background (regardless of how it was computed) srcs = ctss - srcs_bg #this is net_cts = src_cts - bg_cts * (src_area) / (bg_area) #print("ctss, srcs_bg",ctss, srcs_bg) srch = cths - srch_bg #incomplete needs check if srcs<=0: hr=(srch-srcs_bg_obs)/(srch+srcs) elif srch<=0: hr=(srch_bg_obs-srcs_bg_obs)/(srch_bg_obs+srcs) else: hr=(srch-srcs_bg_obs)/(srch+srcs) if nsrc==0: rates = 0. rateh = 0. fluxs = 0. fluxh = 0. exp_sky_avg = 0 exp_det_avg = 0 else: rates = srcs / nsrc#srcs_obs / nsrc rateh = srch / nsrc #srch_obs / nsrc #this is flux = net_cts * src_conversion / total_sky_exp fluxs = np.sum(srcs) / np.sum(exp_sky_tot) #np.sum(srcs_obs) / np.sum(exp_sky_tot) fluxh = np.sum(srch) / np.sum(exp_sky_tot) #np.sum(srch_obs) / np.sum(exp_sky_tot) exp_sky_avg = exp_sky_tot / nsrc exp_det_avg = exp_det_tot / nsrc #Poisson errors in the rate (Gehrels, 1986) errrates = (np.sqrt( ctss + 0.75) + 1.) / nsrc errrateh = (np.sqrt( cths + 0.75) + 1.) / nsrc #print("fluxs,fluxh",fluxs,fluxh) #Poisson errors in the flux errfluxs = errrates/rates * fluxs errfluxh = errrateh/rateh * fluxh #find ctss for each individual source: indsy=np.where(i_src_valid)[0] ind_rates,ind_rateh,ind_ctspss,ind_ctspsh=np.full(len(ind_bghs),-99.),np.full(len(ind_bghs),-99.),np.full(len(ind_bghs),-99.),np.full(len(ind_bghs),-99.) ind_soft_flux,ind_hard_flux=np.full(len(ind_bghs),-99.),np.full(len(ind_bghs),-99.) ind_errrates,ind_errrateh,ind_errctspss,ind_errctspsh=np.full(len(ind_bghs),-99.),np.full(len(ind_bghs),-99.),np.full(len(ind_bghs),-99.),np.full(len(ind_bghs),-99.) #the ind_rates and ind_ctss are as long as the source list, the bg_in_srcaper,i_src_valid is the same length as the number of soruces with matches ig1,need_ind,ig2=np.intersect1d(sources_id,idv,return_indices=True) #for iss in np.arange(len(idv)): #if i_src_valid[iss]: #ind_rates[need_ind[indsy]] = np.clip(ind_ctss[need_ind[indsy]] - bg_in_srcaper_soft[indsy],None,None) #ind_rateh[need_ind[indsy]] = np.clip(ind_cths[need_ind[indsy]] - bg_in_srcaper_hard[indsy],None,None) ind_rates[need_ind[indsy]] = ind_ctss[need_ind[indsy]] - bg_in_srcaper_soft[indsy] ind_rateh[need_ind[indsy]] = ind_cths[need_ind[indsy]] - bg_in_srcaper_hard[indsy] ind_errrates[need_ind[indsy]] = (np.sqrt(abs(ind_ctss[need_ind[indsy]]) + 0.75) + 1.) ind_errrateh[need_ind[indsy]] = (np.sqrt(abs(ind_cths[need_ind[indsy]]) + 0.75) + 1.) ind_ctspss[need_ind[indsy]] = ind_rates[need_ind[indsy]]/ind_exp_sky[need_ind[indsy]] ind_ctspsh[need_ind[indsy]] = ind_rateh[need_ind[indsy]]/ind_exp_sky[need_ind[indsy]] ind_errctspss[need_ind[indsy]] = ind_errrates[need_ind[indsy]]/abs(ind_rates[need_ind[indsy]]) * ind_ctspss[need_ind[indsy]] ind_errctspsh[need_ind[indsy]] = ind_errrateh[need_ind[indsy]]/abs(ind_rateh[need_ind[indsy]]) * ind_ctspsh[need_ind[indsy]] #print(len(ind_ctss_flux[need_ind[indsy]]),len(bg_in_srcaper_soft_flux[indsy]),len(ind_exp_sky[need_ind[indsy]])) ind_soft_flux[need_ind[indsy]] = (ind_ctss_flux[need_ind[indsy]] - bg_in_srcaper_soft_flux[indsy])/ind_exp_sky[need_ind[indsy]] ind_hard_flux[need_ind[indsy]] = (ind_cths_flux[need_ind[indsy]] - bg_in_srcaper_hard_flux[indsy])/ind_exp_sky[need_ind[indsy]] #Part 9: Final Output #writing output '''This is the part of the code in which the output data which has already been computed is actually placed into rout, and printed''' ni = 1 varso = ['net_ctss', 'net_cths', 'ctsb', 'cthb', 'srcs', 'srch', 'hr', 'nsrc', 'rates',\ 'rateh', 'errrates', 'errrateh', 'count_per_ss', 'count_per_sh', 'errcntpss', 'errcntpsh'] varso=['out_'+i for i in varso] all_src_len=1 data = Table([np.full(all_src_len,ctss), np.full(all_src_len,cths), np.full(all_src_len,ctsb),\ np.full(all_src_len,cthb), np.full(all_src_len,srcs), np.full(all_src_len,srch), np.full(all_src_len,hr), np.full(all_src_len,nsrc), np.full(all_src_len,rates),\ np.full(all_src_len,rateh), np.full(all_src_len,errrates),\ np.full(all_src_len,errrateh), np.full(all_src_len,fluxs),\ np.full(all_src_len,fluxh), np.full(all_src_len,errfluxs),\ np.full(all_src_len,errfluxh)], names=varso) if ind_cts: #put everything inside the data structure ind_vars = ["ID",'cts_w_bg_s', 'cts_w_bg_h', 'ctsb', 'cthb', "net_count_s", "net_count_h", "err_netc_s", "err_netc_h", 'cnt_ps_s', 'cnt_ps_h', 'err_cnt_ps_s', 'err_cnt_ps_h', "soft_flux","hard_flux", 'bgss', 'bghs','srcs_flux','srch_flux', 'bgss_sky', 'bgss_det', 'bghs_sky', 'bghs_det', 'srcrad', 'mdist', 'obsid', 'istack', 'igood', 'ilim', 'imdist', 'exp_det', 'exp_sky', 'match'] ind_vars=['ind_'+i for i in ind_vars] data2 = Table([sources_id,ind_ctss, ind_cths, ind_ctsb, ind_cthb, ind_rates,ind_rateh, ind_errrates,ind_errrateh,ind_ctspss,ind_ctspsh, ind_errctspss,ind_errctspsh,ind_soft_flux,ind_hard_flux, ind_bgss, ind_bghs,ind_srcs_flux,ind_srch_flux, ind_bgss_sky, ind_bgss_det, ind_bghs_sky, ind_bghs_det, ind_srcrad, ind_mdist, ind_obsid, ind_istack, ind_igood, ind_ilim, ind_imdist, ind_exp_det, ind_exp_sky, ind_match], names=ind_vars) #data=hstack([data, data2]) #print out the results: if "sublist" in locals(): if len(sublist)!=0: #Find the photons associated with the objects in sublist i_imgstack = iuse*0. for tt in np.arange(len(sublist)): igd=np.where(id==sublist[tt])[0] if len(igd)>0: i_imgstack[igd]=1 else: i_imgstack=iuse*0+1 #Now output the stacked images: if len(stacked_outimage)!=0: file_0=stacked_outimage.split(".")[0] iplot=np.where(iuse*i_srcph_softe*i_imgstack)[0] res,rev_indo,ed=pidl_funcs.hist_nd(np.array([x[iplot],y[iplot]]),[(-1.)*boxsize/2.,(-1.)*boxsize/2.], [boxsize/2.,boxsize/2.],[1,1]) szres=res.shape xcnt=int((szres[0]+1.)/2.) ycnt=int((szres[1]+1.)/2.) subres=res[xcnt-15:xcnt+15,ycnt-15:ycnt+15] #write fits file hdu_new=fits.PrimaryHDU(subres) hdu_new.writeto(file_0+'_05_2.fits',overwrite=True) #Write fits file iplot=np.where(iuse*i_srcph_harde*i_imgstack)[0] res,rev_indo,ed=pidl_funcs.hist_nd(np.array([x[iplot],y[iplot]]),[(-1.)*boxsize/2.,(-1.)*boxsize/2.], [boxsize/2.,boxsize/2.],[1,1]) szres=res.shape xcnt=int((szres[0]+1.)/2.) ycnt=int((szres[1]+1.)/2.) subres=res[xcnt-15:xcnt+15,ycnt-15:ycnt+15] hdu_new=fits.PrimaryHDU(subres) hdu_new.writeto(file_0+'_2_7.fits',overwrite=True) return data,data2