trnspr2.py – treatment of sample transparency aberration (mode 2)
Implementation of deconvolutional treatment about sample transparency aberration in XRD data collected for finite-thickness and finite-width specimen [Ida, in preparation].
Python code “trnspr2.py” is shown below.
Click (tap) [Python] icon on the right-top edge of the code area to copy the code to clipboard.
# -*- coding: utf-8 -*-
##########################################
# trnspr2.py
# to treat Sample-Transparency Aberration
# originally coded by T. Ida
# modified by T. Ida, September 27, 2022
# modified by T. Ida, April 6, 2024
##########################################
import numpy as np # import "numpy" modeule as "np"
from scipy import interpolate # import "interpolate" from "scipy"
from scipy import special # import "special" from "scipy"
import os.path # import "os.path" module
import configparser # import "configparser"
import common5 as cmn # import "common5.py" as "cmn"
########################################################
# calc_kappas()
# Calculate Cumulants of Sample-Transparency Aberration
########################################################
def calc_kappas(degTwot, cfg):
# degTwot: 2Theta in [deg.] (NumPy array)
# cfg=[muInv,gonioR,specT,specW,degDS,alphaTrans,marginTrans,skipTrans,error_type]
muInv = cfg[0]
gonioR = cfg[1]
specT = cfg[2]
W = cfg[3]
degDS = cfg[4]
t_R = specT/gonioR
muR = gonioR/muInv
theta = degTwot * np.pi/360 # (NumPy array)
cos_theta = np.cos(theta) # (NumPy array)
sin_theta = np.sin(theta) # (NumPy array)
tan_theta = sin_theta/cos_theta # (NumPy array)
R_phiDS = gonioR * degDS * np.pi/180 # (float)
Omega = R_phiDS / sin_theta # (NumPy array)
tau = 2*specT/tan_theta # (NumPy array)
Omega0 = (Omega+W)/2 - tau
Omega1 = (W-Omega)/2
Omega2 = W-tau
Omega3 = (W+Omega)/2
g = cos_theta*sin_theta/muR # (NumPy array)
# Calculate Raw (Non-Central) Moments up to 4th order
# Case (a) and (c)
u = 2*t_R*cos_theta # (NumPy array)
exp_u_g = np.exp(-u/g) # (NumPy array)
s0i = 1 - exp_u_g
s1i = -(-u)*exp_u_g - g*s0i
s2i = -(-u)**2*exp_u_g - g*2*s1i
s3i = -(-u)**3*exp_u_g - g*3*s2i
s4i = -(-u)**4*exp_u_g - g*4*s3i
s5i = -(-u)**5*exp_u_g - g*5*s4i
# Case (b)
u1 = Omega1*sin_theta/gonioR
exp_u1_g = np.exp(-u1/g) # (NumPy array)
s01 = 1 - exp_u1_g
s11 = -(-u1)*exp_u1_g - g*s01
s21 = -(-u1)**2*exp_u1_g - g*2*s11
s31 = -(-u1)**3*exp_u1_g - g*3*s21
s41 = -(-u1)**4*exp_u1_g - g*4*s31
s51 = -(-u1)**5*exp_u1_g - g*5*s41
# Case (c')
u3 = Omega3*sin_theta/gonioR
exp_u3_g = np.exp(-u3/g) # (NumPy array)
s03 = 1 - exp_u3_g
s13 = -(-u3)*exp_u3_g - g*s03
s23 = -(-u3)**2*exp_u3_g - g*2*s13
s33 = -(-u3)**3*exp_u3_g - g*3*s23
s43 = -(-u3)**4*exp_u3_g - g*4*s33
s53 = -(-u3)**5*exp_u3_g - g*5*s43
# Case (d)
u2 = W*sin_theta/gonioR
exp_u2_g = np.exp(-u2/g) # (NumPy array)
s02 = 1 - exp_u2_g
s12 = -(-u2)*exp_u2_g - g*s02
s22 = -(-u2)**2*exp_u2_g - g*2*s12
s32 = -(-u2)**3*exp_u2_g - g*3*s22
s42 = -(-u2)**4*exp_u2_g - g*4*s32
s52 = -(-u2)**5*exp_u2_g - g*5*s42
# Five cases ...
case_a = (Omega+2*tau <= W)
case_b = ((np.maximum(Omega,2*tau-Omega) <= W)&(W < Omega+2*tau))
case_c = ((tau <= W) & (W < Omega))
case_cp = ((Omega <= W)&(W < 2*tau-Omega))
case_d = (W < np.minimum(Omega,tau))
fB0 = Omega3/Omega
fB1 = tau/(u*Omega)
fB2 = -Omega1/Omega
fB3 = -Omega1/(u1*Omega)
fC0 = 1.0
fC1 = tau/(u*W)
fCp0 = Omega3/Omega
fCp1 = Omega3/(Omega*u3)
fCp2 = -Omega1/Omega
fCp3 = -Omega1/(Omega*u1)
fD0 = 1.0
fD1 = 1/u2
# Coefficients for s,sx0,sx2,sx3
f0 = np.where(case_a,1,np.where(case_b,fB0,np.where(case_c,fC0,0)))
f1 = np.where(case_a,0,np.where(case_b,fB1,np.where(case_c,fC1,0)))
f2 = np.where(case_b,fB2,np.where(case_cp,fCp2,0))
f3 = np.where(case_b,fB3,np.where(case_cp,fCp3,0))
f4 = np.where(case_d,fD0,0)
f5 = np.where(case_d,fD1,0)
f6 = np.where(case_cp,fCp0,0)
f7 = np.where(case_cp,fCp1,0)
#
s0 = f0*s0i + f1*s1i + f2*s01 + f3*s11 + f4*s02 + f5*s12 + f6*s03 + f7*s13
s1 = f0*s1i + f1*s2i + f2*s11 + f3*s21 + f4*s12 + f5*s22 + f6*s13 + f7*s23
s2 = f0*s2i + f1*s3i + f2*s21 + f3*s31 + f4*s22 + f5*s32 + f6*s23 + f7*s33
s3 = f0*s3i + f1*s4i + f2*s31 + f3*s41 + f4*s32 + f5*s42 + f6*s33 + f7*s43
s4 = f0*s4i + f1*s5i + f2*s41 + f3*s51 + f4*s42 + f5*s52 + f6*s43 + f7*s53
#
m1 = s1 / s0
m2 = s2 / s0
m3 = s3 / s0
m4 = s4 / s0
# Calculate Cumulants up to 4th order
kappa1 = m1
kappa2 = m2 - m1**2
kappa3 = m3 - 3*m2*m1 + 2*m1**3
kappa4 = m4 - 4*m3*m1 - 3*m2**2 + 12*m2*m1**2 - 6*m1**4
# Convert to Reduced Cumulants in [deg.]
kappa1 = 180.0/np.pi*kappa1
kappa2 = 180.0/np.pi*np.sqrt(kappa2)
kappa3 = 180.0/np.pi*np.sign(kappa3)*np.abs(kappa3)**(1/3)
kappa4 = 180.0/np.pi*np.sign(kappa4)*np.abs(kappa4)**(1/4)
kappas = np.array([s0, kappa1,kappa2,kappa3,kappa4])
return kappas
# returns kappas: (Python List of NumPy array)
# [s0,kappa1,kappa2,kappa3,kappa4]
##############################
# Instrumental model function
##############################
def fOmega0(delTwot, gam, ups):
return np.where(delTwot<-ups,0.0,np.where(delTwot<0,np.exp(delTwot/gam)/gam,0.0))
def fOmega1(delTwot, gam, ups):
return np.where(delTwot<-ups,0.0,np.where(delTwot<0,(1+delTwot/ups)*np.exp(delTwot/gam)/gam,0.0))
def fInstr(degDelTwot, degTwot, cfg):
# degDelTwot: Delta 2Theta in [deg.] (NumPy array)
# degTwot: 2Theta in [deg.] (float)
# cfg=[muInv,gonioR,specT,specW,degDS,alphaTrans,marginTrans,skipTrans,error_type]
delTwot = degDelTwot * np.pi/180 # (NumPy array)
theta = degTwot * np.pi/360 # (float)
cos_theta = np.cos(theta) # (float)
sin_theta = np.sin(theta) # (float)
tan_theta = sin_theta/cos_theta # (float)
muInv = cfg[0]
R = cfg[1]
t = cfg[2]
W = cfg[3]
degDS = cfg[4]
muR = R/muInv
R_phiDS = R * degDS * np.pi/180 # (float)
Omega = R_phiDS/sin_theta # (float)
tau = 2*t/tan_theta # (float)
g = cos_theta*sin_theta/(muR) # (float)
u = 2*t*cos_theta/R
# Calculate Raw (Non-Central) Moments up to 4th order
# Case (a) and (b')
if (Omega + 2*tau < W): # case (a)
ans = fOmega0(delTwot, g, u)
elif (Omega < W): # case (b)
Omega0 = (W + Omega)/2 - tau
Omega1 = (W - Omega)/2
t1 = (W - Omega)*tan_theta/4
u1 = 2*t1*cos_theta/R
ans = Omega0 * fOmega0(delTwot, g, u)
ans += tau * fOmega1(delTwot, g, u)
ans -= Omega1 * fOmega1(delTwot, g, u1)
ans /= Omega
elif (tau < W): # case (b')
Omega2 = W - tau
ans = Omega2 * fOmega0(delTwot, g, u)
ans += tau * fOmega1(delTwot, g, u)
ans /= W
else: # case (c)
t2 = W*tan_theta/2
u2 = 2*t2*cos_theta/R
ans = fOmega1(delTwot, g, u2)
return ans
############################################################
# Definition of Fourier transform of deconvolution function
# As the instrumental function is "exp(chi) [chi < 0]",
# Fourier transform should be
# int_{-inf}^{0} exp(chi) * exp(2π i xi chi) dchi
# = int_{-inf}^{0} exp((1 + 2πi xi) chi) dchi
# = [exp((1 + 2πi xi) chi) / (1 + 2πi xi) ]_{k=-inf}^{k=0}
# = 1 / (1 + 2πi xi)
# NOTE: it depends on definition of the Fourier transform
############################################################
def fDeconv(chi):
return np.where(chi<0,np.exp(chi),0)
def fFtDeconv(xi):
ans = 1.0 / (1.0 - 2 * np.pi * 1j * xi)
return ans
################################################
# "trnspr2.treat"
# TREATMENT OF SAMPLE-TRANSPARENCY, MAIN BODY
################################################
def treat(source, kappas, cfg):
# source: Source Data (Python List)
# [s_angle, s_intensity],
# [s_angle, s_intensity, s_error1], or
# [s_angle, s_intensity, s_error1, s_error2]
# cfg=[muInv,gonioR,specT,specW,degDS,alphaTrans,marginTrans,skipTrans,error_type]
muInv = cfg[0]
gonioR = cfg[1]
specT = cfg[2]
specW = cfg[3]
degDS = cfg[4]
alphaTrans = cfg[5]
marginTrans = cfg[6]
skipTrans = cfg[7]
error_type = cfg[8]
t_R = specT/gonioR
muR = gonioR/muInv
modeTrans = '0' # Reciprocal expression
##############################################################
# Stage 1: Compensation of the truncation by finite thickness
# (Recovers the intensity lost by pass-through effect)
# NOTE: NOT a Fourier analysis
##############################################################
#******************************************************
# Step 1-0: Parse Python List "source" to numpy.array
#******************************************************
s_angle = np.array(source[0])
s_intensity = np.array(source[1])
if ((error_type == '1') or (error_type == '3')):
s_error1 = np.array(source[2])
if ((error_type == '2') or (error_type == '3')):
s_error2 = np.array(source[2])
d_intensity = np.array(s_intensity)
#****************************************************
# Step 1-1: Scale Transform for 3rd cumulant, "chi3"
# s_angle (2Theta in deg.) => source_x
# s_intensity => source_y
#****************************************************
vEPS = 1.0E-6
nPoint = s_angle.size
kappa0 = kappas[0]
kappa1 = kappas[1]
kappa3 = np.sign(kappas[3])*np.abs(kappas[3])**3
# s0 = np.array(kappas[0])[0]
# k1 = np.array(kappas[1])[0]
# k2 = np.array(kappas[2])[0]
# k3 = np.array(kappas[3])[0]
# k3 = np.sign(k3)*np.abs(k3)**3
k3 = -2.0 # 3rd cumulant of modelprofile on chi3 scale
dchi3_d2Theta = np.power(k3/kappa3, 1.0/3) # d(chi3)/d(2Theta)
source_x = np.cumsum( dchi3_d2Theta )
source_x *= s_angle[1] - s_angle[0]
source_corr = cmn.g_corr(s_angle / 2) # goemetrical correction
source_corr /= dchi3_d2Theta
source_corr2 = 1 / kappa0
source_y = s_intensity * source_corr * source_corr2
if ((error_type == '1') or (error_type == '2')):
source[2] *= source_corr2
elif (error_type == '3'):
source[3] *= source_corr2
#*********************************************
# Step 1-2: Preparation for Fourier treatment
#*********************************************
marginModel = 1.0
margin = marginTrans * marginModel
index = cmn.indices(source_x, margin)
nSource = index[0] # number of source data
nValid = index[1] # number of valid desitination data
i1 = index[2] # index for first division point
i2 = index[3] # index for second division point
nDest = index[4] # total number of destination data
nDest_2 = int(nDest/2)
dchi3 = (source_x[nSource-1] - source_x[0]) / (nValid - 1)
chi0 = source_x[0] # (float)
wchi3 = nValid * dchi3
chi3 = np.linspace(chi0, chi0 + wchi3, nValid, endpoint = False)
#*********************************************************
# Step 1-3:
# Cubic spline interpolation of data to transformed scale
# (source_x, source_y) => (chi3, eta3)
#*********************************************************
f3 = interpolate.interp1d(source_x, source_y, kind="cubic")
chi3[-1] = np.minimum(chi3[-1],source_x[-1])
eta3 = f3(chi3)
#********************************************
# Step 1-4:
# Extrapolation for marginal (padding) range
#********************************************
wchi3 = nDest * dchi3
chi3 = np.linspace(chi0, chi0 + wchi3, nDest, endpoint = False)
eta3 = np.resize(eta3, nDest)
cmn.pad_margin(eta3, index)
#***********************************************
# Step 1-5: Fourier transform of intensity data
# (eta3) => (ft_eta3)
#***********************************************
ft_eta3 = np.fft.fft(eta3)
# print("nDest = ",nDest)
# print("nFT = ",nFT)
#**********************************************************
# Step 1-6: Fourier transform of approximate model profile
# => (ft_deconv3), (ft_conv3)
#**********************************************************
dxi3=1.0/(dchi3*nDest)
xi3=np.linspace(0.0,nDest*dxi3,nDest,endpoint=False)
xi3[nDest_2:nDest] -= nDest * dxi3
ft_deconv3=fFtDeconv(xi3)
ft_conv3 = np.abs(ft_deconv3)
#*****************************************************************
# Step 1-7: Fourier transform about artificial shift introduction
#*****************************************************************
ft_shift3 = np.exp(-2 * np.pi * 1.0j * xi3)
ft_inst3 = ft_deconv3 * ft_shift3 / ft_conv3
#***********************************************
# Step 1-8: Deconvolution/convolution treatment
#***********************************************
ft_zeta3 = ft_eta3 / ft_inst3
#*************************************
# Step 1-9: Inverse Fourier transform
#*************************************
zeta3 = np.real(np.fft.ifft(ft_zeta3))
#******************************
# Step 1-10: Pick up intensity
#******************************
wchi3 = nDest * dchi3
chi3 = np.linspace(chi0, chi0 + wchi3, nDest, endpoint = False)
f3 = interpolate.interp1d(chi3, zeta3, kind="cubic")
dest3_y = f3(source_x)
#******************************
# Step 1-11: Rescale transform
#******************************
d3_intensity = dest3_y / source_corr
dest = source
dest[1] = d3_intensity
#********************************************
# Step 1-12: Fourier treatment of error data
#********************************************
dest = cmn.TreatErrors(error_type, source, source_x, index, source_corr, ft_inst3)
#***************************************************
# Step 1-13: Pass "dest" as "source" for next stage
#***************************************************
source = dest
##########################################
# Stage 2: Correction of the 1st cumulant
##########################################
#*****************************
# Step 2-0: Parse Python list
#*****************************
s_angle = np.array(source[0])
s_intensity = np.array(source[1])
#***************************
# Step 2-1: Scale transform
#***************************
k1 = -1.0
dchi1_d2Theta = k1 / kappa1
source_x = np.cumsum( dchi1_d2Theta )
source_x *= (s_angle[1] - s_angle[0])
source_corr = cmn.g_corr(s_angle / 2) # geometrical correction
source_corr /= dchi1_d2Theta
source_y = s_intensity * source_corr
#*********************************************
# Step 2-2: Preparation for Fourier treatment
#*********************************************
index = cmn.indices(source_x, margin)
nSource = index[0]
nValid = index[1]
i2 = index[2]
i3 = index[3]
nDest = index[4]
nDest_2 = int (nDest/2)
dchi1 = (source_x[nSource-1] - source_x[0]) / (nValid - 1)
chi0 = source_x[0]
wchi1 = nValid * dchi1
chi1 = np.linspace(chi0, chi0 + wchi1, nValid, endpoint = False)
#**************************************
# Step 2-3: Cubic spline interpolation
#**************************************
f1 = interpolate.interp1d(source_x, source_y, kind="cubic", fill_value="extrapolate")
eta1 = f1(chi1)
#**********************************
# Step 2-4: Padding marginal range
#**********************************
wchi1 = nDest * dchi1
chi1 = np.linspace(chi0, chi0 + wchi1, nDest, endpoint = False)
eta1 = np.resize(eta1, nDest)
cmn.pad_margin(eta1, index)
#***********************************************
# Step 2-5: Fourier transform of intensity data
#***********************************************
ft_eta1 = np.fft.fft(eta1)
dxi1 = 1.0 / (dchi1 * nDest)
xi1 = np.linspace(0, nDest*dxi1, nDest, endpoint=False)
xi1[nDest_2:nDest] -= nDest * dxi1
#***********************************************
# Step 2-6: Fourier transform of shift function
#***********************************************
ft_shift1 = np.exp(2 * np.pi * 1.0j * xi1)
ft_inst1 = ft_shift1
#****************************
# Step 2-7: Shift correction
#****************************
ft_zeta1 = ft_eta1 / ft_inst1
#*************************************
# Step 2-8: Inverse Fourier transform
#*************************************
zeta1 = np.real(np.fft.ifft(ft_zeta1))
wchi1 = nDest * dchi1
chi1 = np.linspace(chi0, chi0 + wchi1, nDest, endpoint = False)
#*****************************
# Step 2-9: Pick up intensity
#*****************************
f1 = interpolate.interp1d(chi1, zeta1, kind="cubic")
dest_y = f1(source_x)
#******************************
# Step 2-10: Rescale transform
#******************************
d_intensity = dest_y / source_corr
dest = source
dest[1] = d_intensity
#*******************************************
# Step 2-11: Fourir treatment of error data
#*******************************************
dest = cmn.TreatErrors(error_type, source, source_x, index, source_corr, ft_inst1)
return dest