axial5.py

axial5.py – treatment of axial-divergence aberration

Python code “axial5.py” for treatment of axial-divergence aberration in Bragg-Brentano geometry is shown below.

Click (tap) [Python] icon on the right-top edge of the code area to copy the code to clipboard.

# -*- coding: utf-8 -*-
# axial5.py
# T. Ida, Sep. 8, 2021
# Modified by T. Ida, September 21, 2023 (ver. 5.3)
# Mosified by T. Ida, December 29, 2023 (ver. 5.4)

import numpy as np # import "numpy" library
from scipy import interpolate # import "interploate" module from "scipy"
from scipy import special # import "special" module from "scipy"
import os.path # import "os.path" module
import configparser # import "configparser" library
import common5 as cmn # import "common5.py" as "cmn"

########################################################
# calc_kappas()
# returns integrated intensity and Reduced Cumulants of 
# Axial-Divergence Aberration
# based on Exact Formula & Numerical Calculation
# Output units are [0:none;1:deg.;2:deg.,3:deg.;4:deg.]
########################################################
def calc_kappas(degTwot, degAxial, degAxial2):
    # degTwot: 2Theta in [deg.] (NumPy float array)
    # degAxial: Soller slit angle for incident beam (float number)
    # degAxial2: Soller slit angle for diffracted beam (float number)

    # 10-term Gauss-Legendre abscissa & weights
    xGL=np.array([0.013047,0.067468,0.160295,0.283302,0.425563,\
    0.574437,0.716698,0.839705,0.932532,0.986953])
    wGL=np.array([0.033336,0.074726,0.109543,0.134633,0.147762,\
    0.147762,0.134633,0.109543,0.074726,0.033336])
    pi_180 = np.pi / 180.0
    twot = degTwot * pi_180 # 2Theta (NumPy float array)
    cos2T = np.cos(twot) # cos(2Theta) (NumPy float array)
    nSize = degTwot.size # (integer number)
    nOnes = np.ones(nSize)
    nSamp = 10 # Sampling numbers
    nSamp2 = nSamp**2
    phi = degAxial * pi_180
    phi2 = degAxial2 * pi_180
    a = -phi+2*phi*xGL # (NumPy array) (nSamp)
    cos_a = np.cos(a) # (NumPy array) (nSamp)
    sin_a = np.sin(a) # (NumPy array) (nSamp)
    b = -phi2+phi2*xGL # (NumPy array) (nSamp)
    cos_b = np.cos(b) # (NumPy array) (nSamp)
    sin_b = np.sin(b) # (NumPy array) (nSamp)
    cos_a_cos_b = np.outer(cos_a, cos_b).reshape(nSamp2,)
    sin_a_sin_b = np.outer(sin_a, sin_b).reshape(nSamp2,)
    term1 = np.outer(cos2T, cos_a_cos_b) # (NumPy array) (nSamp*nSamp)
    term2 = np.outer(nOnes, sin_a_sin_b) # (NumPy array) (nSamp*nSamp)
    cosTerm = term1 - term2
    twot = np.repeat(twot,nSamp2).reshape(nSize,nSamp2)
    del2T = twot - np.arccos(cosTerm) # (nSize,nSamp*nSamp)
    fa = (1-np.fabs(a)/phi)*wGL # weight (nSamp)
    fb = (1-np.fabs(b)/phi2)*wGL # weight (nSamp)
    fab = np.outer(fa, fb).reshape(nSamp2,) * 4 # (nSamp*nSamp)
    weight = np.outer(nOnes,fab).reshape(nSize,nSamp2)
    # Calculate power average up to 4th order...
    s0 = weight.sum(axis=1)
    s1 = (del2T * weight).sum(axis=1)
    s2 = (del2T**2 * weight).sum(axis=1)
    s3 = (del2T**3 * weight).sum(axis=1)
    s4 = (del2T**4 * weight).sum(axis=1)
    # Calculate Cumulants up to 4th order ...
    cum1 = s1/s0
    cum2 = s2/s0-(s1/s0)**2
    cum3 = s3/s0-3*s2*s1/s0**2+2*(s1/s0)**3
    cum4 = s4/s0-4*s3*s1/s0**2-3*(s2/s0)**2
    cum4 += 12*s2*s1**2/s0**3-6*(s1/s0)**4
    cum1 = cum1/pi_180
    cum2 = np.sqrt(cum2)/pi_180
    cum3 = np.sign(cum3)*np.abs(cum3)**(1/3)/pi_180
    cum4 = np.sign(cum4)*np.abs(cum4)**(1/4)/pi_180
    cums=np.array([s0,cum1,cum2,cum3,cum4])
    return(cums)

###################################################################
# Define Fourier Transform of Peak Profile Model for Deconvolution
# for sign == -1/1, 
###################################################################
def fFtDeconv(alpha, sign, xi):
    # alpha # (float)
    # sign = -1, 1
    ans = np.power(1 + sign * 2*np.pi*1j * xi,-alpha)
    return ans
###############################################################
# Define Peak Profile Model for Deconvolution for sign == -1/1 
################################################################
def fDeconv(alpha, sign, x):
    # alpha # (float)
    # sign = -1, 1
    # x: NumPy array
    def fFunc(gam_alpha,abs_x):
        np.seterr(divide="ignore")
        ans = abs_x**(alpha-1)*np.exp(-abs_x)/gam_alpha
        np.seterr(divide="warn")
        return ans
    gam_alpha = special.gamma(alpha) # Complete Gamma function
    ans = np.where(x*sign>0,fFunc(gam_alpha,np.abs(x)),0)
    return ans

######################################################
# "axial5.treat"
# TREATMEaNT OF AXIAL DIVERGENCE ABERRATION, MAIN BODY
######################################################
def treat(sign, source, kappas, cfg):
    # sign = +1: higher-angle / sign = -1: lower-angle
    # source: Source Data (Python List)
    #    [s_angle, s_intensity],
    #    [s_angle, s_intensity, s_error1],
    #    or [s_angle, s_intensity, s_error1, s_error2]
    # kappas: cumulants of axial divergence aberrations
    # cfg: [alphaAxial,maginAxial,skipAxial,error_type]
    
    #***********************************
    # Configuration from "dct.cfg"
    #***********************************
    alphaAxial=cfg[0]
    marginAxial=cfg[1]
    modeAxial=cfg[2]
    skipAxial=cfg[3]
    error_type=cfg[4]

    #*****************************************************
    # Assign Items in Python List "source" to numpy.array
    #*****************************************************
    s_angle = np.array(source[0]) # 0th item is array of angles
    s_intensity = np.array(source[1]) # 1st item is array of intensity
    d_intensity = np.array(s_intensity)
    nData = s_angle.size
    cum1 = kappas[1]
    cum3 = np.sign(kappas[3])*np.abs(kappas[3])**3
    k1 = alphaAxial # 1st-order cumulant of model function
    k3 = 2*alphaAxial # 3rd-order cumulant of model function
    i90 = np.argmin(np.fabs(s_angle-90))
    #----------------------------------------------------
    # 1st-order function for d(chi)/d(2Theta) (dchi_d2T) 
    #----------------------------------------------------
    def f1(iSign,cum1,cum3,k1,k3):
        d = k1*cum3/(3*k3*cum1)-cum1**2/(12*k1**2)
        return 1.0/( iSign*cum1/(2*k1) + np.sqrt(d) )
    #--------------------------------
    # Interpolation or extrapolation 
    #--------------------------------
    def polate(x,y,i,i1,i2):
        # x,y: (NumPy array)
        # i,i1,i2: (integer)
        return ((x[i2]-x[i])*y[i1]+(x[i]-x[i1])*y[i2])/(x[i2]-x[i1])
    #--------------------
    # Calculate dchi_d2T
    #--------------------
    dchi_d2T=np.empty(nData)
    if (i90==0):
        dchi_d2T[1:nData]=f1(sign,cum1[1:nData],cum3[1:nData],k1,k3)
        dchi_d2T[0]=polate(s_angle,dchi_d2T,0,1,2)
    elif (i90<nData-1):
        dchi_d2T[0:i90]=f1(sign,cum1[0:i90],cum3[0:i90],k1,k3)
        dchi_d2T[i90+1:nData] = f1(sign,cum1[i90+1:nData],cum3[i90+1:nData],k1,k3)
        dchi_d2T[i90]=polate(s_angle,dchi_d2T,i90,i90-1,i90+1)
    else: # if (i90==nData-1):
        dchi_d2T[0:nData-1] = f1(sign,cum1[0:nData-1],cum3[0:nData-1],k1,k3)
        dchi_d2T[nData-1]=polate(s_angle,dchi_d2T,nData-1,nData-3,nData-2)

    # if (sign==-1):
    #     print("dchi_d2T(axial-) = ",dchi_d2T)
    # else:
    #     print("dchi_d2T(axial+) = ",dchi_d2T)

    #*******************************************************
    # Step 1: Scale Transform from "2Theta" (equidistant)
    # to "chi" (non-equidistant)
    # s_angle (2Theta in deg.) => source_chi (on chi-scale)
    # s_intensity => source_eta (ordinate on chi-scale)
    #*******************************************************
    # Cumulative sum, start from zero
    source_chi = np.cumsum(dchi_d2T) - dchi_d2T[0]
    source_chi *= (s_angle[nData-1]-s_angle[0])/(nData-1)
    source_corr = cmn.g_corr(s_angle/2)/dchi_d2T # Correction Factor
    # Corrected and Transformed Intensity
    source_eta = s_intensity * source_corr # ordinate 
    # Error data are also corrected
    if ((error_type == '1') or (error_type == '2')):
        source[2] = source[2] * source_corr
    elif (error_type == '3'):
        source[3] = source[3] * source_corr

    #******************************************************************
    # Step 2: Preparation for Fourier treatment
    # Determine Marginal (Padding) Range, Total Points & Step Interval
    #******************************************************************
    margin = marginAxial
    index = cmn.indices(source_chi, margin)
        # indices() method is in "dct_common.py"
    nSource = index[0] # 0th index, Number of source data
    i1 = index[1] # 1st index, Number of interpolated data
    i2 = index[2] # 2nd index, 1/3-point of margin
    i3 = index[3] # 3rd index, 2/3-point of margin
    nDest = index[4] # 4th index, total number of points
    iMin = index[5] # minimum separation point in scaled data
    dchi = index[6] # step interval
    # Prepare Arrays for Interpolated Data in Valid Range
    nValid = i1
    chi0 = source_chi[0]
    chiV = source_chi[nSource-1]
    chi = np.linspace(chi0,chiV,nValid,endpoint = False)

    #*********************************************************
    # Step 3: 
    # Cubic spline interpolation of Data to Transformed Scale
    # (source_chi, source_eta) => (chi, eta)
    #*********************************************************
    # Cubic spline interpolation
    f3 = interpolate.interp1d(source_chi, source_eta, kind="cubic")
    eta = f3(chi)

    #********************************************
    # Step 4: 
    # Extrapolation for marginal (padding) range
    # (chi, eta) => (chi, eta)
    #********************************************
    # Fill Marginal Area with Values
    wchi = nDest * dchi
    chi = np.linspace(chi0,chi0+nDest*dchi,num=nDest,endpoint=False)
    eta = np.resize(eta, nDest)
    cmn.pad_margin(eta, index)
        # pad_margin() method is in "dct_common.py"

    #*********************************************
    # Step 5: Fourier Transform of Intensity Data
    # (eta) => (ft_eta)
    #*********************************************
    # Fourier transform of intensity data
    ft_eta = np.fft.rfft(eta)
    nFT = ft_eta.size
    #*************************************************************
    # Step 6: Fourier Transform of Axial-Divergence Model Profile
    # Function To Be Deconvolved (Axial-Divergence Model)
    # => (ft_deconv), (ft_conv)
    #*************************************************************
    if (modeAxial=='0'): # Reciprocal expression
        dxi=1.0/(dchi*nDest) # Step interval of Fourier transform
        # Abscissa of Fourier transform
        xi = np.linspace(start=0,stop=nFT*dxi,num=nFT,endpoint=False)
        # Fourier Transform of Function to be Deconvolved (+/- Axial)
        ft_deconv = fFtDeconv(alphaAxial, sign, xi)
        # Fourier Transform of Function to be Convolved
        ft_conv = np.abs(ft_deconv)
    elif (modeAxial=='1'): # Real expression
        if (sign == -1): # lower-angle component
            chi = np.linspace(-nDest*dchi,0,nDest,endpoint=False)
        else: # upper-angle component
            chi = np.linspace(0,nDest*dchi,nDest,endpoint=False)
        deconv = fDeconv(alphaAxial,sign,chi)
        deconv[0] = 1.0/dchi - np.sum(deconv[1:nDest])
        ft_deconv = np.fft.rfft(deconv) # FFT for real array
        # Fourier transform of function to be convolved
        ft_conv = np.abs(ft_deconv) # (complex absolute value)
    # Fourier transform of instrumental function
    ft_inst = ft_deconv / ft_conv

    #**********************************************************
    # Step 7: Here is a Core of Deconvolutional Treatment,
    # Deconvolutional Treatment on Fourier transform
    # (ft_eta)/(ft_inst) => (ft_eta)
    #**********************************************************
    # Deconvolution/Convolution in Fourier transform
    ft_eta = ft_eta / ft_inst

    #***********************************
    # Step 8: Inverse Fourier Transform
    # (ft_eta) => (zeta)
    #***********************************
    # Inverse Fourier transform of DCT data
    zeta = np.real(np.fft.irfft(ft_eta)) # inverse FFT for real destination

    #***********************************************
    # Step 9: Pick up Intensity from DCT data
    # on "chi" scale by cubic spline interpolation
    # (chi, zeta) => (source_chi, dest_zeta)
    #***********************************************
    # Reset equidistant abscissa, chi
    chi = np.linspace(chi0,chi0+nDest*dchi,nDest,endpoint = False)
    # Cubic spline interpolation, (chi,zeta) => (source_chi, dest_zeta)
    f3 = interpolate.interp1d(chi,zeta,kind="cubic")
    dest_zeta = f3(source_chi)

    #**********************************************
    # Step 10: Rescale transform
    # (source_chi, dest_zeta) => (s_angle, d_intensity)
    #**********************************************
    # Rescale transform
    d_intensity = dest_zeta / source_corr # ordinate values
    d_intensity = np.maximum(0,d_intensity)
    dest = source
    dest[1] = d_intensity
 
    # if (sign==-1):
    #    print("*** Treatment of Axial Divergence Aberration ***")
    # print("s_intensity = ",s_intensity)
    # print("d_intensity = ",d_intensity)
   
    #***********************************************************
    # Step 11: Fourier Treatment of Error Data on axial.treat()
    #***********************************************************
    dest = cmn.TreatErrors(error_type, source, source_chi, index, source_corr, ft_inst)

    return dest