By Hendrik Speleers, AY 2024-2025
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
from scipy import special
from scipy import interpolate
from scipy import integrate
from scipy import linalg
import sympy as sym
Piecewise linear interpolation has approximation order $O(h^2)$ where $h$ is the maximal distance between the interpolation sites. This means that the error between any smooth function and its interpolant (measured in any $L_q$-norm, $1 \leq q \leq \infty$) behaves asymptotically like $O(h^2)$. Check this behavior by approximating the function $\sin(x)$ on the interval $[0, 10]$ and measuring the error in the inf-norm ($q = \infty$).
a_data = 0
b_data = 10
def fun_data(x):
return np.sin(x)
Compute and visualize a sequence of piecewise linear interpolants.
N = 1000
t = np.linspace(a_data, b_data, N)
f_exact = fun_data(t)
L = 10
f_approx = np.zeros((N, L))
for l in range(L):
x = np.linspace(a_data, b_data, 2**l+1)
y = fun_data(x)
fun_approx = interpolate.interp1d(x, y)
f_approx[:, l] = fun_approx(t)
plt.plot(t, f_exact, 'k*')
legend_text = ['Exact']
plt.plot(t, f_approx, '-')
legend_text += ['Interpolant %d' % l for l in range(L)]
plt.legend(legend_text, loc=(1.1, 0))
<matplotlib.legend.Legend at 0x75c31bfdd120>
Compute and visualize the errors in inf-norm.
f_error = np.max(np.abs(f_approx - f_exact[:, np.newaxis]), 0)
f_error
array([1.42875322e+00, 1.72553904e+00, 6.52759121e-01, 1.82287844e-01, 4.84298188e-02, 1.21021337e-02, 3.04556176e-03, 7.62480367e-04, 1.90615990e-04, 4.76518250e-05])
f_error_order = np.log2(f_error[:-1] / f_error[1:])
f_error_order
array([-0.27229037, 1.4024245 , 1.84033235, 1.91225085, 2.00063418, 1.99048116, 1.99793626, 2.00003105, 2.00006577])
plt.semilogy(f_error, '-o')
plt.semilogy([4, 5], [1*1e-3, 1/4*1e-3], 'k--o')
plt.legend(('Error', 'Reference'))
<matplotlib.legend.Legend at 0x75c30ab53130>
A quadrature rule provides an approximation of the definite integral of a function, formulated as a weighted sum of function values at specified points within the domain of integration.
def integrate_gauss_numpy(n, f, a=-1.0, b=1.0):
x, w = np.polynomial.legendre.leggauss(n)
t = (x + 1.0) * (b - a)/2.0 + a
gauss = np.dot(w, f(t)) * (b - a)/2.0
return gauss
def integrate_gauss_scipy(n, f, a=-1.0, b=1.0):
x, w = special.roots_legendre(n)
t = (x + 1.0) * (b - a)/2.0 + a
gauss = np.dot(w, f(t)) * (b - a)/2.0
return gauss
def integrate_quad_scipy(f, a=-1.0, b=1.0):
quad, quad_err = integrate.quad(f, a, b)
return quad
Compute numerically the integral of the polynomial $x^{11} + 3x^4$ on the interval $[0, \pi]$.
a_int = 0.0
b_int = np.pi
def fun_int(x):
return x**11 + 3*x**4
exact = np.pi**12/12 + np.pi**5*3/5
gauss_numpy = integrate_gauss_numpy(6, fun_int, a_int, b_int)
gauss_scipy = integrate_gauss_scipy(6, fun_int, a_int, b_int)
quad_scipy = integrate_quad_scipy(fun_int, a_int, b_int)
gauss_numpy_error = np.abs(gauss_numpy - exact)
gauss_scipy_error = np.abs(gauss_scipy - exact)
quad_scipy_error = np.abs(quad_scipy - exact)
print('Exact solution: %e' % exact)
print('NumPy Gauss-Legendre solution: %e with error: %e' % (gauss_numpy, gauss_numpy_error))
print('SciPy Gauss-Legendre solution: %e with error: %e' % (gauss_scipy, gauss_scipy_error))
print('SciPy QUADPACK solution: %e with error: %e' % (quad_scipy, quad_scipy_error))
Exact solution: 7.720604e+04 NumPy Gauss-Legendre solution: 7.720604e+04 with error: 1.746230e-10 SciPy Gauss-Legendre solution: 7.720604e+04 with error: 5.820766e-11 SciPy QUADPACK solution: 7.720604e+04 with error: 7.275958e-11
Time these three quadrature implementations.
%timeit integrate_gauss_numpy(6, fun_int, a_int, b_int)
%timeit integrate_gauss_scipy(6, fun_int, a_int, b_int)
%timeit integrate_quad_scipy(fun_int, a_int, b_int)
93.2 µs ± 435 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each) 25.2 µs ± 308 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each) 3.04 µs ± 42.8 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
Consider the $n \times n$ matrix $T_n$ and the $n \times 1$ vector $b_n$ with the following structure: $$ T_n = \begin{bmatrix} 1 & -3 & -5 & -7 & \cdots \\ 2 & 1 & -3 & -5 & \cdots \\ 3 & 2 & 1 & -3 & \cdots \\ 4 & 3 & 2 & 1 & \cdots \\ \vdots & \vdots & \vdots & \vdots & \ddots \end{bmatrix}, \quad b_n = \begin{bmatrix} n \\ n-1 \\ n-2 \\ n-3 \\ \vdots \end{bmatrix} $$ Then, compute the solution $x_n$ of the linear system $$T_nx_n=b_n.$$
n = 10
col = np.arange(1, n+1)
row = np.arange(-1, -2*n, -2)
T_scipy = linalg.toeplitz(col, row)
b_scipy = np.arange(n, 0, -1)
T_scipy
array([[ 1, -3, -5, -7, -9, -11, -13, -15, -17, -19], [ 2, 1, -3, -5, -7, -9, -11, -13, -15, -17], [ 3, 2, 1, -3, -5, -7, -9, -11, -13, -15], [ 4, 3, 2, 1, -3, -5, -7, -9, -11, -13], [ 5, 4, 3, 2, 1, -3, -5, -7, -9, -11], [ 6, 5, 4, 3, 2, 1, -3, -5, -7, -9], [ 7, 6, 5, 4, 3, 2, 1, -3, -5, -7], [ 8, 7, 6, 5, 4, 3, 2, 1, -3, -5], [ 9, 8, 7, 6, 5, 4, 3, 2, 1, -3], [ 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]])
x_scipy = linalg.solve(T_scipy, b_scipy)
x_scipy
array([ 0.27338925, -0.00839007, -0.0125851 , -0.01887765, -0.02831647, -0.04247471, -0.06371207, -0.0955681 , -0.14335215, -0.21502823])
T_scipy_slow = np.zeros((n, n), dtype=int)
for i in range(n):
for j in range(n):
T_scipy_slow[i, j] = 2*(i-j)-1 if i<j else i-j+1
T_scipy_slow
array([[ 1, -3, -5, -7, -9, -11, -13, -15, -17, -19], [ 2, 1, -3, -5, -7, -9, -11, -13, -15, -17], [ 3, 2, 1, -3, -5, -7, -9, -11, -13, -15], [ 4, 3, 2, 1, -3, -5, -7, -9, -11, -13], [ 5, 4, 3, 2, 1, -3, -5, -7, -9, -11], [ 6, 5, 4, 3, 2, 1, -3, -5, -7, -9], [ 7, 6, 5, 4, 3, 2, 1, -3, -5, -7], [ 8, 7, 6, 5, 4, 3, 2, 1, -3, -5], [ 9, 8, 7, 6, 5, 4, 3, 2, 1, -3], [ 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]])
def T_fun(i, j):
if i < j:
return 2*(i-j)-1
else:
return i-j+1
def b_fun(i, j):
return n-i
T_sympy = sym.Matrix(n, n, T_fun)
b_sympy = sym.Matrix(n, 1, b_fun)
T_sympy
x_sympy = T_sympy.LUsolve(b_sympy)
x_sympy
x_sympy.evalf()