trnspr1.py – treatment of sample transparency aberration (mode 1)
Implementation of deconvolutional treatment about sample transparency aberration in XRD data collected for finite-thickness specimen [Ida, 2022].
Python code “trnspr1.py” is shown below.
Click (tap) [Python] icon on the right-top edge of the code area to copy the code to clipboard.
# -*- coding: utf-8 -*-
##########################################
# trnspr1.py
# to treat Sample-Transparency Aberration
# originally coded by T. Ida
# modified by T. Ida, February 9, 2022
# modified by T. Ida, December 17, 2023
##########################################
import numpy as np # import "numpy" modeule as "np"
from scipy import interpolate # import "interpolate" from "scipy"
from scipy import special # import "special" from "scipy"
import os.path # import "os.path" module
import configparser # import "configparser"
import common5 as cmn # import "common5.py" as "cmn"
########################################################
# calc_kappas()
# Calculate Cumulants of Sample-Transparency Aberration
########################################################
def calc_kappas(degTwot, cfg):
# degTwot: 2Theta in [deg.] (NumPy array)
# cfg=[muInv,gonioR,specT,specW,degDS,alphaTrans,marginTrans,skipTrans,error_type]
muInv = cfg[0]
gonioR = cfg[1]
specT = cfg[2]
t_R = specT/gonioR
muR = gonioR/muInv
theta = degTwot * np.pi / 360 # (NumPy array)
u = 2 * t_R * np.cos(theta) # (NumPy array)
g = np.sin(2 * theta) / (2 * muR) # (NumPy array)
exp_u_g = np.exp(-u/g) # (NumPy array)
# Calculate Raw (Non-Central) Moments up to 4th order
# Attention: erroneous in the former version
s0 = 1 - exp_u_g
s1 = -(-u)*exp_u_g - g*s0
s2 = -(-u)**2*exp_u_g - g*2*s1
s3 = -(-u)**3*exp_u_g - g*3*s2
s4 = -(-u)**4*exp_u_g - g*4*s3
m1 = s1 / s0
m2 = s2 / s0
m3 = s3 / s0
m4 = s4 / s0
# Calculate Cumulants up to 4th order
kappa1 = m1
kappa2 = m2 - m1**2
kappa3 = m3 - 3*m2*m1 + 2*m1**3
kappa4 = m4 - 4*m3*m1 - 3*m2**2 + 12*m2*m1**2 - 6*m1**4
# Convert to Reduced Cumulants in [deg.]
kappa1 = 180.0/np.pi*kappa1
kappa2 = np.sign(kappa2)*180.0/np.pi*np.sqrt(abs(kappa2))
kappa3 = 180.0/np.pi*np.sign(kappa3)*np.abs(kappa3)**(1/3)
kappa4 = 180.0/np.pi*np.sign(kappa4)*np.abs(kappa4)**(1/4)
kappas = np.array([s0, kappa1,kappa2,kappa3,kappa4])
return kappas
# returns kappas: (Python List of NumPy array)
# [s0,kappa1,kappa2,kappa3,kappa4]
#####################################
# calc_sym_kappas()
# Calculate Cumulants of Untruncated
# Sample-Transparency Aberration
#####################################
def calc_sym_kappas(degTwot, cfg):
# degTwot: 2Theta in [deg.] (NumPy array)
# cfg=[muInv,gonioR,specT,specW,degDS,alphaTrans,marginTrans,skipTrans,error_type]
muInv = cfg[0]
gonioR = cfg[1]
specT = cfg[2]
t_R = specT/gonioR
muR = gonioR/muInv
nSize = degTwot.size
theta = degTwot * np.pi / 360 # (NumPy array)
u = 2 * t_R * np.cos(theta) # (NumPy array)
g = np.sin(2 * theta) / (2 * muR) # (NumPy array)
exp_u_g = 0.0 # np.exp(-u/g) for finite u (NumPy array)
# Calculate Raw (Non-Central) Moments up to 4th order
# Attention: erroneous in the former version
s0 = np.ones(nSize)
s1 = - g
s2 = - g*2*s1
s3 = - g*3*s2
s4 = - g*4*s3
m1 = s1 / s0
m2 = s2 / s0
m3 = s3 / s0
m4 = s4 / s0
# Calculate Cumulants up to 4th order
kappa1 = m1
kappa2 = m2 - m1**2
kappa3 = m3 - 3*m2*m1 + 2*m1**3
kappa4 = m4 - 4*m3*m1 - 3*m2**2 + 12*m2*m1**2 - 6*m1**4
# Convert to Reduced Cumulants in [deg.]
kappa1 = 180.0/np.pi*kappa1
kappa2 = 180.0/np.pi*np.sqrt(kappa2)
kappa3 = 180.0/np.pi*np.sign(kappa3)*np.abs(kappa3)**(1/3)
kappa4 = 180.0/np.pi*np.sign(kappa4)*np.abs(kappa4)**(1/4)
kappas = np.array([s0, kappa1,kappa2,kappa3,kappa4])
return kappas
# returns kappas: (Python List of NumPy array)
# [s0,kappa1,kappa2,kappa3,kappa4]
############################################################
# Definition of Fourier transform of deconvolution function
# As the instrumental function is "exp(chi) [chi < 0]",
# Fourier transform should be
# int_{-inf}^{0} exp(chi) * exp(2πi xi chi) dchi
# = int_{-inf}^{0} exp((1 + 2πi xi) chi) dchi
# = [exp((1 + 2πi xi) chi) / (1 + 2πi xi) ]_{k=-inf}^{k=0}
# = 1 / (1 + 2πi xi)
# NOTE: it depends on definition of the Fourier transform
############################################################
def fFtDeconv(xi):
ans = 1.0 / (1.0 + 2 * np.pi * 1j * xi)
return ans
################################################
# "trnspr.treat"
# TREATMENT OF SAMPLE-TRANSPARENCY, MAIN BODY
################################################
def treat(sign, source, kappas, cfg):
# sign: -1 or 1
# source: Source Data (Python List)
# [s_angle, s_intensity],
# [s_angle, s_intensity, s_error1], or
# [s_angle, s_intensity, s_error1, s_error2]
# kappas: Cumulants(NumPy array)
# cfg: Configuration Parameters (Python list)
# cfg=[muInv,gonioR,specT,specW,degDS,alphaTrans,marginTrans,skipTrans,error_type]
#
#******************************
# Configuration from "dct.cfg"
#******************************
muInv=cfg[0] # Penetration Depth
gonioR=cfg[1] # Goniometer Radius
specT=cfg[2] # Specimen Thickness
specW=cfg[3] # Specimen Width
degDS=cfg[4]
alphaTrans=cfg[5] # Shape Parameter
marginTrans=cfg[6] # Margin for FFT
skipTrams=cfg[7]
error_type=cfg[8] # error type
# muR = (linear attenuation coefficient)*(goniometer radius)
muR = gonioR / muInv
# t_R = (sample thicknes)/(goniometer radius)
t_R = specT / gonioR
##############################################################
# Stage 1: Compensation of the truncation by finite thickness
# (Recovers the intensity lost by pass-through effect)
# NOTE: NOT a Fourier analysis
##############################################################
#******************************************************
# Step 1-0: Divide Python List "source" to numpy.array
#******************************************************
s_angle = np.array(source[0])
s_intensity = np.array(source[1])
if ((error_type == '1') or (error_type == '3')):
s_error1 = np.array(source[2])
if ((error_type == '2') or (error_type == '3')):
s_error2 = np.array(source[2])
d_intensity = np.array(s_intensity)
#****************************************************
# Step 1-1: Scale Transform for the finite thickness
# (pass-through effect)
# s_angle (2Theta in deg.) => chiS0
# s_intensity => etaS0
#****************************************************
vEPS = 1.0E-6
nPoint = s_angle.size
theta = s_angle * np.pi / 360
# theta: (NumPy array)
# [s_angle[0]*pi/360,...,s_angle[nPoint-1]*pi/360]
chiS0 = 1.0/t_R*np.log(2/(1-np.tan(theta/2))-1)
# chiS0: (NumPy array)
# [chiS0[0],...,chiS0[nPoint-1]]
u = 2*t_R*np.cos(theta)
# u: (NumPy array)
# [2*(t/R)*cos(theta[0]),...,2*(t/R)*cos(theta[nPoint-1])]
g = np.sin(2*theta) / (2 * muR)
# g: (NumPy array)
# [sin(2*theta[0])/(2*muR),...,sin(2*theta[nPoint-1])/(2*muR)]
nChi = np.ceil(np.log(g[nPoint-1]/u[nPoint-1]/vEPS))
nChi = nChi.astype(np.int64)
# Ordinate Corrections
# (Lorentz, polarization and powder diffractometry corrections)
source_corr = u * cmn.g_corr(s_angle / 2)
# Corrected & Transformed Intensity
etaS0 = s_intensity * source_corr
# Preparation for error treatment
if ((error_type == '1') or (error_type == '3')):
etaS0_error1 = s_error1 * source_corr
if ((error_type == '2') or (error_type == '3')):
etaS0_error2 = s_error2 * source_corr
#*********************************************
# Step 1-2: Compensation for Finite thickness
# (NOT Fourier treatment)
# Note: Single interpolation treatment
#*********************************************
# for iChi in range(nChi): # for iChi = 0 to nChi-1
# chiS_i = chiS - iChi - 1 # shifted by -1, -2, -3, ...
# etaS_i = etaS0 * exp(-u/g * (iChi+1) ) # attenuate
# fS = interpolate.interp1d(chiS_i, etaS_i, kind="linear")
# etaS1 += fS(chiS)
iChi = np.arange(nChi).repeat(nPoint).reshape(nChi,nPoint)
# arange(nChi): [0,1,...,nChi-1]
# .repeat(nPoint): [0,...,0,1,...,1,...,nChi-1,...nChi-1]
# .reshape(nChi,nPoint):
# [[0,...,0],[1,...,1],...,[nChi-1,...nChi-1]]
chiS_i = np.tile(chiS0,nChi).reshape(nChi,nPoint)
# chiS_i:
# [[chiS0[0],...,chiS0[nPoint-1]],
# [chiS0[0],...,chiS0[nPoint-1]],
# ...,
# [chiS0[0],...,chiS0[nPoint-1]]]
chiS_i -= iChi
# chiS_i:
# [[chiS0[0],...,chiS0[nPoint-1]],
# [[chiS0[0]-1,...,chiS0[nPoint-1]-1],
# ...,
# [chiS0[0]-nChi+1,...,chiS0[nPoint-1]-nChi+1]]
chiS_i[:,nPoint-1] = chiS0[nPoint-1]
# chiS_i:
# [[chiS0[0],...,chiS0[nPoint-2],chiS0[nPoint-1]],
# [[chiS0[0]-1,...,chiS0[nPoint-2]-1,chiS0[nPoint-1]],
# ...,
# [chiS0[0]-nChi+1,...,chiS0[nPoint-2]-nChi+1],
# chiS0[nPoint-1]]
etaS_i = np.tile(etaS0,nChi).reshape(nChi,nPoint)
# etaS_i:
# [[etaS0[0],...,etaS0[nPoint-1]],
# [etaS0[0],...,etaS0[nPoint-1]],
# ...,
# [etaS0[0],...,etaS0[nPoint-1]]]
exp_i = np.exp(-iChi*(np.tile(u/g, nChi).reshape(nChi,nPoint)))
# exp_i:
# [[1,...,1],
# [exp(-(u/g)[0]),...,exp(-(u/g)[nPoint-1])],
# ...,
# [exp(-(nChi-1)*(u/g)[0]),...,exp(-(nChi-1)*(u/g)[nPoint-1])]]
etaS_i = etaS_i * exp_i
etaS1_i = etaS_i
for i in range(nChi):
fS = interpolate.interp1d(chiS_i[i], etaS_i[i])
etaS1_i[i] = fS(chiS0)
etaS1 = np.sum(etaS1_i, axis=0)
if ((error_type == '1') or (error_type == '3')):
etaS_error1_i = np.tile(etaS0_error1,nChi).reshape(nChi,nPoint)
etaS_error1_i = etaS_error1_i * exp_i
etaS1_error1_i = etaS_error1_i
for i in range(nChi):
fS = interpolate.interp1d(chiS_i[i], etaS_error1_i[i])
etaS1_error1_i = fS(chiS0)
sq_etaS1_error1_i = etaS1_error1_i**2
etaS1_error1 = np.sqrt(np.sum(sq_etaS1_error1_i,axis=0))
etaS1_error1 = etaS1_error1 / source_corr
if ((error_type == '2') or (error_type == '3')):
etaS_error2_i = np.tile(etaS0_error2,nChi).reshape(nChi,nPoint)
etaS_error2_i = etaS_error2_i * exp_i
etaS1_error2_i = etaS_error2_i
for i in range(nChi):
fS = interpolate.interp1d(chiS_i[i], etaS_error2_i[i])
etaS1_error2_i = fS(chiS0)
sq_etaS1_error2_i = etaS1_error2_i**2
etaS1_error2 = np.sqrt(np.sum(sq_etaS1_error2_i,axis=0))
etaS1_error2 = etaS1_error2 / source_corr
#**********************************************
# Step 1-3: Rescale transform
# (chiS, etaS1) => (s_angle, d_intensity)
# (chiS, etaS1_error1) => (s_angle, d_error1)
# (chiS, etaS1_error2) => (s_angle, d_error2)
#**********************************************
d_intensity = etaS1 / source_corr
dest = source
dest[1] = d_intensity
if (error_type == '1'):
dest[2] = etaS1_error1
elif (error_type == '2'):
dest[2] = etaS1_error2
elif (error_type == '3'):
dest[2] = etaS1_error1
dest[3] = etaS1_error2
#*******************************************************
# Step 1-4 Pass "dest" as "source" for the 2nd stage ...
#*******************************************************
source = dest
##########################################
# Stage 2: Correction of the Extinction
# (Symmetrization about decay effect)
# NOTE: Fourier Treatment
##########################################
#******************************************************
# Step 2-0: Divide Python List "source" to numpy.array
# Attention: Missed in some previous versions
#******************************************************
s_angle = np.array(source[0])
s_intensity = np.array(source[1])
if (error_type == '1'):
s_error1 = np.array(source[2])
elif (error_type == '2'):
s_error2 = np.array(source[2])
elif (error_type == '3'):
s_error1 = np.array(source[2])
s_error2 = np.array(source[3])
d_intensity = np.array(s_intensity)
#********************************************************
# Step 2-1: Scale Transform for the decay, "chiT"
# (s_angle, s_intensity) => (source_x, source_y)
#********************************************************
# Abscissa values (Non-equidistantly separated) (chiT)
source_chiT = 2 * muR *np.log(np.tan(theta))
# Ordinate correction factor
source_corr = g * cmn.g_corr(s_angle / 2)
# Corrected intensity (etaT)
source_etaT = s_intensity * source_corr
#******************************************************************
# Step 2-2: Preparation for Fourier treatment
# Determine Marginal (Padding) Range, Total Points & Step Interval
#******************************************************************
marginModel = 1
# margin: defines marginal (padding) range
margin = marginTrans * marginModel
# marginTrans is the value read from the file'dct.cfg'
# User can change the value in the current version.
# NOTE: The use of the same value of margin may not be justified.
index = cmn.indices(source_chiT, margin)
# Derive total number and 1st,2nd,3rd anker points
# dct_cmn.indices(x, w_margin) is defined in "dct_cmn.py"
# x[..] = source_x : abscissa (NumPy array)
nSource = index[0] # number of source data points
nValid = index[1] # index of 1st anker point on interpolated abscissa
i2 = index[2] # index of 2nd anker point
i3 = index[3] # index of 3rd anker point
nDest = index[4] # total number of interpolated data
nDest_2 = int(nDest/2) # half of the total number
# Step Interval on "chiT" scale
dchiT = (source_chiT[nSource-1] - source_chiT[0]) / nValid
# Prepare equidistant data for valid range
nValid = int((source_chiT[nSource-1] - source_chiT[0]) / dchiT)
chi0 = source_chiT[0]
wchiT = nValid * dchiT
chiT = np.linspace(chi0, chi0 + wchiT, nValid, endpoint = False)
#*********************************************************
# Step 2-3:
# Cubic spline interpolation of Data on Transformed Scale
# (source_chiT, source_etaT) => (chiT, etaT)
#*********************************************************
# Cubic spline interpolation
# eta = np.array(chi)
fT = interpolate.interp1d(source_chiT, source_etaT, kind="cubic")
etaT = fT(chiT) # (source_x, source_y) => (chiT, etaT)
#********************************************
# Step 2-4:
# Extrapolation for marginal (padding) range
# (chiT, etaT) => (chiT, etaT)
#********************************************
# Fill marginal range with extrapolated values
wchiT = nDest * dchiT
chiT = np.linspace(chi0, chi0 + wchiT, nDest, endpoint = False)
etaT = np.resize(etaT, nDest)
cmn.pad_margin(etaT, index)
#***********************************************
# Step 2-5: Fourier Transform of Intensity Data
# (etaT) => (ft_etaT)
#***********************************************
# Fourier transform of intensity
ft_etaT = np.fft.fft(etaT)
#***************************************************
# Step 2-6: Fourier Transform of Decay function
# => (ft_inst)
#***************************************************
# Fourier transform of function to be deonvolved
instT = np.linspace(-wchiT, 0, nDest)
instT = np.exp(instT)
ft_instT = np.fft.fft(instT)
ft_instT = ft_instT / np.abs(ft_instT)
#************************************************
# Step 2-7: Decay Correction by Fourier method
# Deconvolutional Treatment on Fourier transform
# (ft_etaT)/(ft_instT) => (ft_zetaT)
#************************************************
ft_zetaT = ft_etaT / ft_instT
#*************************************
# Step 2-8: Inverse Fourier Transform
# (ft_zetaT) => (zetaT)
#*************************************
zetaT = np.real(np.fft.ifft(ft_zetaT))
#******************************************************
# Step 2-9: Pick up Intensity from shift treated data
# on "chi3" scale by cubic spline interpolation
# (chiT, zetaT) => (source_chiT, dest_etaT)
#*****************************************************
fT = interpolate.interp1d(chiT, zetaT, kind="cubic")
dest_etaT = fT(source_chiT)
#***********************************************
# Step 2-10: Rescale transform
# (source_x, dest_y) => (2Theta, d_intensity)
#***********************************************
d_intensity = dest_etaT / source_corr
dest = source
dest[1] = d_intensity
#*******************************************************
# Step 2-11: Fourier Treatment of Error Data on Stage 2
#*******************************************************
dest = cmn.TreatErrors(error_type, source, source_chiT, index, source_corr, ft_instT)
return dest # Return Python list