trnspr4.py

Implementation of deconvolutional treatment about sample transparency aberration in XRD data collected for finite-thickness and finite-width specimen filled into a translucent sample holder.

Python code “trnspr4.py” is shown below.

Click (tap) [Python] icon on the right-top edge of the code area to copy the code to clipboard.

# -*- coding: utf-8 -*-
###########################################
# trnspr4.py 
# to treat Sample-Transparency Aberration
# by a two-step deconvolutioal method
# originally coded by T. Ida, May 14, 2024
# modified by T. Ida, Jun. 11, 2024
###########################################
import numpy as np # import "numpy" modeule as "np"
from scipy import interpolate # import "interpolate" from "scipy"
from scipy import special # import "special" from "scipy"
import common5 as cmn # import "common5.py" as "cmn"

########################################################
# calc_kappas()
# Calculate Cumulants of Sample-Transparency Aberration
########################################################
def calc_kappas(degTwot, cfg):
    # degTwot: 2Theta in [deg.] (NumPy array)
    # cfg=[muInv,gonioR,specT,specW,degDS,alphaTrans,nU,nX
    #       marginTrans,skipTrans,error_type]
    muInv = cfg['muInv']
    gonioR = cfg['gonioR']
    specT = cfg['specT']
    specW = cfg['specW']
    degDS = cfg['degDS']
    muInv2 = cfg['muInv2']
    nU = cfg['nzTrans']
    nX = cfg['nxTrans']
    t_R = specT/gonioR
    muR = gonioR/muInv
    nSize = degTwot.size
    B = gonioR * degDS * np.pi/180 # Beam width (float)
    # muInv: penetration depth in [mm], (float)
    # gonioR: goniometer radius in [mm], (float)
    # degDS: divergence slit open angle in[deg.], (float) 
    # specT: thickness of sample in [mm], (float)
    # specW: width of sample in [mm], (float)
    theta = degTwot * np.pi/360 # theta in [rad.] (nSize,)
    cosT = np.cos(theta) # cos(theta) (nSize,)
    sinT = np.sin(theta) # sin(theta) (nSize,)
    tanT = sinT/cosT # tan(theta) (nSize,)
    Z_L = np.maximum(-specT,-0.5*B/cosT-0.5*specW/tanT)
    # Z_U = np.zeros(nSize)
    U_L = np.exp(2*Z_L/(muInv*sinT)) #(nSize,)
    U_U = np.ones(nSize) # (nSize,)
    #
    # Gauss-Legendre along Z, sampling locations & weights
    xGL_i,wGL_i,mu_i = special.roots_legendre(nU,mu=True)
    wGL_i /= mu_i
    # (nU,),(nU,),(float)
    #
    u_i = np.outer(U_L,1-xGL_i)/2 # (nSize,nU)
    u_i += np.outer(U_U,1+xGL_i)/2 # (nSize,nU)
    sinT_i = np.resize(np.repeat(sinT,nU),(nSize,nU))
    cosT_i = np.resize(np.repeat(cosT,nU),(nSize,nU))
    tanT_i = np.resize(np.repeat(tanT,nU),(nSize,nU))
    z_i = muInv*sinT_i*np.log(u_i)/2 # (nSize,nU)
    #
    # XL_i: horizontal lower limit
    XL_i = np.maximum(-specW/2,-z_i/tanT_i-B/(2*sinT_i))
    # XU_i: horizontal upper limit
    XU_i = np.minimum(specW/2,-z_i/tanT_i+B/(2*sinT_i))
    x_i = 2*z_i*cosT_i/gonioR # (nSize,nU)
    g_i = (XU_i-XL_i)/u_i # (nSize,nU)
    g0_i = g_i
    g1_i = x_i * g0_i
    g2_i = x_i * g1_i
    g3_i = x_i * g2_i
    g4_i = x_i * g3_i
    #
    # Gauss-Legendre along X
    xGL_j,wGL_j,mu_j = special.roots_legendre(nX,mu=True)
    wGL_j /= mu_j
    # (nX,),(nX,),(float)
    #
    x_ij = np.outer(XL_i,1-xGL_j)/2 # (nSize,nU,nX)?
    x_ij += np.outer(XU_i,1+xGL_j)/2 # (nSize,nU,nX)?
    x_ij = np.resize(x_ij,(nSize,nU,nX))
    W_ij = np.full((nSize,nU,nX),specW)
    z_ij = np.resize(np.repeat(z_i,nX),(nSize,nU,nX))
    sinT_ij = np.resize(np.repeat(sinT_i,nX),(nSize,nU,nX))
    cosT_ij = np.resize(np.repeat(cosT_i,nX),(nSize,nU,nX))
    tanT_ij = np.resize(np.repeat(tanT_i,nX),(nSize,nU,nX))
    xI_ij = x_ij + z_ij/tanT_ij # incident point
    l_SI_ij = np.where(-W_ij/2<xI_ij,-z_ij/sinT_ij,(W_ij/2+x_ij)/cosT_ij)
    l_HI_ij = np.where(-W_ij/2<xI_ij,0,-(W_ij/2+xI_ij)/cosT_ij)
    xE_ij = x_ij - z_ij/tanT_ij # emitting point
    l_SE_ij = np.where(xE_ij<W_ij/2,-z_ij/sinT_ij,(W_ij/2-x_ij)/cosT_ij)
    l_HE_ij = np.where(xE_ij<W_ij/2,0,(xE_ij-W_ij/2)/cosT_ij)
    h_ij = (l_SI_ij+l_SE_ij)/muInv
    h_ij += (l_HI_ij+l_HE_ij)/muInv2
    h_ij = np.exp(-h_ij) # (nSize,nU,nX)
    # Gauss-Legendre along X
    h_i = np.dot(h_ij,wGL_j) # (nSize,nU)
    # Gauss-Legendre along Z
    f = (U_U - U_L) * sinT / B # ?!!!!!
    s0 = f * np.dot(g0_i*h_i,wGL_i) # (nSize,)
    s1 = f * np.dot(g1_i*h_i,wGL_i)
    s2 = f * np.dot(g2_i*h_i,wGL_i)
    s3 = f * np.dot(g3_i*h_i,wGL_i)
    s4 = f * np.dot(g4_i*h_i,wGL_i)
    #
    # print('s0 = ',s0)
    m1 = s1 / s0
    m2 = s2 / s0
    m3 = s3 / s0
    m4 = s4 / s0
    # Calculate Cumulants up to 4th order
    kappa1 = m1
    kappa2 = m2 - m1**2
    kappa3 = m3 - 3*m2*m1 + 2*m1**3
    kappa4 = m4 - 4*m3*m1 - 3*m2**2 + 12*m2*m1**2 - 6*m1**4
    # Modify intensity data 
    s0 = np.where(B/sinT < specW, s0, s0*B/(specW*sinT)) 
    # Convert to Reduced Cumulants in [deg.]
    kappa1 = 180.0/np.pi*kappa1
    kappa2 = 180.0/np.pi*np.sqrt(kappa2)
    kappa3 = 180.0/np.pi*np.sign(kappa3)*np.abs(kappa3)**(1/3)
    kappa4 = 180.0/np.pi*np.sign(kappa4)*np.abs(kappa4)**(1/4)
    kappas = np.array([s0, kappa1,kappa2,kappa3,kappa4])
    return kappas
        # returns kappas: (Python List of NumPy array)
        # [s0,kappa1,kappa2,kappa3,kappa4]

############################
# Component model functions
############################
# fTexp(), Truncated Exponential Function
def fTexp(x):
    return np.where(x<0.0,np.exp(x),0.0)
# fFtTexp(), Fourier Transform of fTexp()
def fFtTexp(xi):
    return 1 / (1 + 2*np.pi*1j*xi)
# fRect(), Rectangular Function
def fRect(x):
    return np.where((-1.0<x)&(x<0.0),1.0,0.0)
# fFtRect(), Fourier Transform of fRect()
def fFtRect(xi):
    farZero = np.abs(xi) > 1.0E-6
    numer=1.0-np.exp(-2*np.pi*1j*xi) # numerator
    denom=2*np.pi*1j*xi # denominator
    ones=np.ones_like(numer)
    np.seterr(invalid='ignore')
    ans = np.divide(numer,denom,out=ones,where=farZero)
    np.seterr(divide='warn')
    return ans

##############################################
# "trnspr5.treat"
# TREATMENT OF SAMPLE TRANSPARENCY ABERRATION
##############################################
def treat(sign,source,kappas,cfg):
    # sign: -1 or 1
    # (sign==-1: exponential)(sign==1: rectangular)
    # source: Source Data (Python List)
    #    [s_angle, s_intensity],
    #    [s_angle, s_intensity, s_error1], or
    #    [s_angle, s_intensity, s_error1, s_error2]
    # cfg=[muInv,gonioR,specT,specW,degDS,alphaTrans,marginTrans,skipTrans,error_type]
    debug = 0
    if (debug == 1):
        if (sign==-1):
            print("*** Treatment of Sample Transparency Aberration (exponential) ***")
        else:
            print("*** Treatment of Sample Transparency Aberration (rectangular) ***")
    #******************************
    # Configuration from "dct.cfg"
    #******************************
    marginTrans = cfg['marginTrans']
    modeTrans = 3
    skipTrans = cfg['skipTrans']
    error_type = cfg['error_type']

    #***************************************************
    # Step 0: Parse Python List "source" to numpy.array
    #***************************************************
    s_angle = np.array(source[0])
    s_int = np.array(source[1])
    if ((error_type == '1') or (error_type == '3')):
        s_error1 = np.array(source[2])
    if ((error_type == '2') or (error_type == '3')):
        s_error2 = np.array(source[2])
    if (debug == 1):
        print("s_int[0] = ",s_int[0])
        print("s_int[-1] = ",s_int[-1])

    #*****************************************************
    # Step 1: Scale Transform for Exponential 
    # Rectangulsr Components
    # s_angle (2Theta in deg.) => source_chi
    # s_int => source_eta
    #*****************************************************
    nData = s_angle.size # number of data points
    kappa0 = kappas[0]
    kappa1 = kappas[1]
    kappa3 = np.sign(kappas[3])*np.abs(kappas[3])**3
    # 1st & 3rd Cumulants of Truncated Exponential Component
    k1Texp = -1.0
    k3Texp = -2.0
    # 1st & 3rd Cumulants of Rectangular Component
    k1Rect = -0.5
    k3Rect = 0.0
    dchi_d2T_exp = np.power(k3Texp/kappa3, 1.0/3)
    margin = marginTrans
    iStart = 0
    if (sign==-1): # (truncated exponential)
        print('margin = ',margin)
        dchi_d2T = dchi_d2T_exp
        source_chi = np.cumsum(dchi_d2T)
        source_chi -= dchi_d2T[0] # !
        source_chi *= (s_angle[nData-1] - s_angle[0])/(nData-1)
        # print('chi(E) = ',source_chi)
    else: # (rectangular)
        denom = kappa1 - k1Texp/dchi_d2T_exp # (maybe almost 0)
        vEps = 1.0E-6 # (small number)
        vInf = 1.0E+7 # (large number)
        k1Rect = -0.5
        dchi_d2T = np.minimum(vInf,np.maximum(vEps,k1Rect/denom))
        # cum_dchi_d2T = np.cumsum(dchi_d2T)
        # print('cum_dchi_d2T = ',cum_dchi_d2T)
        # source_chi = np.flip(-cum_dchi_d2T) # flip
        # source_chi -= dchi_d2T[0] # !
        source_chi = np.cumsum(dchi_d2T)
        source_chi *= (s_angle[nData-1] - s_angle[0])/(nData-1)
        diff_chi = np.diff(source_chi)
        # print('chi(R) = ',source_chi)
        # print('diff_chi = ',diff_chi)
        # non0_diff_chi = np.where(diff_chi > 0)
        # print('non0_diff_chi = ',non0_diff_chi)
        # source_chi_size = source_chi.size
        # print('source_chi.size() = ',source_chi_size)
        # expand = np.linspace(1.,1.000001,num=source_chi_size,endpoint=False)
        # source_chi *= expand
    if (debug==1):
        print("source_chi[0] = ",source_chi[0])
        print("source_chi[-1] = ",source_chi[-1],"(size: ",source_chi.size,")")
    source_corr = cmn.g_corr(s_angle/2)/dchi_d2T
        # cmn.g_corr(): geometrical correction
    source_eta = s_int * source_corr
    if (sign==-1):
        source_corr2 = 1 / kappa0
        source_eta *= source_corr2
        if ((error_type == '1') or (error_type == '2')):
            source[2] *= source_corr2
        elif (error_type == '3'):
            source[3] *= source_corr2
    if (debug==1):
        print("max(s_int[0:500]) is at angle = ",s_angle[np.argmax(s_int[0:500])])
        print("d(chi)/d(2T)[0] = ",dchi_d2T[0])
        print("d(chi)/d(2T)[-1] = ",dchi_d2T[-1],"(size: ",dchi_d2T.size,")")
        print("source_chi[0] = ",source_chi[0])
        print("source_chi[-1] = ",source_chi[-1],"(size: ",source_chi.size,")")
        print("source_corr[0] = ",source_corr[0])
        print("source_corr[-1] = ",source_corr[-1],"(size: ",source_corr.size,")")
        print("source_eta[0] = ",source_eta[0])
        print("source_eta[-1] = ",source_eta[-1],"(size: ",source_eta.size,")")
    #*******************************************
    # Step 2: Preparation for Fourier treatment
    #*******************************************
    # print('source_chi = ',source_chi)
    index = cmn.indices(source_chi, margin)
    nSource = index[0] # number of source data (=nData)
    nValid = index[1] # index of last valid data
    i1 = index[2] # index for first division point in marginal range
    i2 = index[3] # index for second division point in marginal range
    nDest = index[4] # total number of equidistant data
    iMin = index[5] # index of minimum separation in source data
    dchi = index[6] # interval of equidistant data
    # dx_min = index[7] # minimum separation found in source data (not used)
    if (debug==1):
        print("margin = ",margin)
        print("index = ",index)
        print("dchi = ",dchi)
    # chi0 = source_x[0] # (float)
    chi=np.linspace(start=0,stop=nValid*dchi,num=nValid,endpoint=False)
    #*********************************************************
    # Step 3:
    # Cubic spline interpolation of data to transformed scale
    # (source_x, source_y) => (chi, eta)
    #*********************************************************
    f3 = interpolate.interp1d(source_chi,source_eta,kind="cubic",fill_value='extrapolate')
    chi[nValid-1]=np.minimum(chi[nValid-1],source_chi[nData-1])
    eta = f3(chi)
    if (debug == 1):
        print("eta[0] = ",eta[0],"eta[1] = ",eta[1])
        print("eta[-1] (before padding) = ",eta[-1],"(size=",eta.size,")")
    #**********************************************************
    # Step 4:
    # Redimension & Extrapolation for marginal (padding) range
    #**********************************************************
    chi = np.linspace(0, nDest*dchi, nDest, endpoint=False)
    eta = np.resize(eta, nDest)
    cmn.pad_margin(eta, index) # => "cmn_common.py"
    if (debug == 1):
        print("chi[0]=",chi[0],"chi[-1]=",chi[-1])
        print("eta[-1] (after padding) = ",eta[-1],"(size=",eta.size,")")
    # print("xi = ",xi,"(",xi.size,")")
    # print("eta = ",eta,"(",eta.size,")")
    # print("index = ",index)
    #***********************************************
    # Step 5: Fourier transform of intensity data
    # (eta) => (ft_eta)
    #***********************************************
    ft_eta = np.fft.rfft(eta)
    nFT = ft_eta.size
    if (debug == 1):
        print("ft_eta[0] = ",ft_eta[0],", ft_eta[1] = ",ft_eta[1])
        print("ft_eta[-1] = ",ft_eta[-1],"(size: ",nFT,")")
    #********************************************************
    # Step 6: Fourier transform of approximate model profile
    # => (ft_deconv), (ft_conv)
    #********************************************************
    if (modeTrans=='0'):
        dxi=1.0/(nDest*dchi)
        xi=np.linspace(0,nFT*dxi,nFT,endpoint="False")
        if (sign==-1):
            ft_deconv = fFtTexp(xi)
            ft_conv = np.abs(ft_deconv)
        elif (sign==1):
            ft_deconv = fFtRect(xi)
            ft_conv = np.abs(ft_deconv)
    else: # if (modeTrans==1):
        chi = np.linspace(-nDest*dchi, 0, nDest, endpoint=False)
        if (sign==-1): # (exponential)
            deconv = fTexp(chi) # see Line 118
        else: # (rectangular)
            deconv = fRect(chi)
        deconv[0] = 1.0/dchi-np.sum(deconv[1:nDest]) # normalization
        ft_deconv = np.fft.rfft(deconv) # Fourier transform
        ft_conv = np.array(np.abs(ft_deconv))
    if (debug==1):
        print("modeTrans = ",modeTrans)
        print("ft_deconv[0] = ",ft_deconv[0],", ft_deconv[1] = ",ft_deconv[1])
        print("ft_deconv[-1] = ",ft_deconv[-1],"(size:",ft_deconv.size,")")
        print("ft_conv[0] = ",ft_conv[0],", ft_conv[1] = ",ft_conv[1])
        print("ft_conv[-1] = ",ft_conv[-1],"(size:",ft_conv.size,")")
    #***************************************************
    # Step 7: Fourier transformed instrumental function
    #***************************************************
    # np.seterr(invalid='ignore')
    np.seterr(invalid='warn')
    ft_inst = ft_deconv / ft_conv # deconvolutional
    if (debug==1):
        print("ft_inst[0] = ",ft_inst[0],", ft_inst[1] = ",ft_inst[1])
        print("ft_inst[-1] = ",ft_inst[-1],"(size:",ft_inst.size,")")
    #***********************************************
    # Step 8: Deconvolution/convolution treatment
    #***********************************************
    ft_zeta = ft_eta / ft_inst # deconvolutional
    if (debug==1):
        print("ft_zeta[0] = ",ft_zeta[0],", ft_zeta[1] = ",ft_zeta[1])
        print("ft_zeta[-1] = ",ft_zeta[-1])
    np.seterr(invalid='warn')
    #*************************************
    # Step 9: Inverse Fourier transform
    #*************************************
    zeta = np.real(np.fft.irfft(ft_zeta)) # inverse Fourier transform
    if (debug==1):
        print("zeta[0] = ",zeta[0])
        print("zeta[-1] = ",zeta[-1],"(size: ",zeta.size,")")
    # print("ft_deconv = ",ft_deconv,"(",ft_deconv.size,")")
    # print("ft_conv = ",ft_conv)
    # print("ft_conv/ft_deconv =",ft_conv/ft_deconv)
    # print("ft_eta = ",ft_eta)
    #******************************
    # Step 10: Pick up intensity
    #******************************
    chi = np.linspace(0, nDest*dchi, nDest, endpoint = False)
    f3 = interpolate.interp1d(chi, zeta, kind="cubic")
    dest_eta = f3(source_chi)
    #******************************
    # Step 11: Rescale transform
    # dest_y => d_int
    #******************************
    d_int = dest_eta / source_corr
    d_int = np.maximum(0,d_int)
    dest = source
    dest[1] = d_int
    if (debug==1):
        print("d_int[0] = ",d_int[0],"d_int[1] = ",d_int[1])
        print("d_int[-1] = ",d_int[-1],"(size:",d_int.size,")")
        print("max(d_int[0:500]) is at angle = ",s_angle[np.argmax(d_int[0:500])])
        print("")
    #**********************************************************
    # Step 12: Fourier treatment of error data (not validated)
    #**********************************************************
    dest = cmn.TreatErrors(error_type, source, source_chi, index, source_corr, ft_inst)
    
    return dest