CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In

Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.

| Download

Analytic solutions of the Fokker-Planck after having added multiplicative stochastic white noise

Views: 34
License: GPL3
Image: ubuntu2204
Kernel: SageMath 10.1

This calculations corresponds to the stationary condition

%display latex
# Define some constants that we will use later c = var('c') C = var('C') gamma = var('gamma') D = var('D') x0 = var('x0') alpha = var('alpha') assume(D > 0) assume(gamma, 'real') assume(x0, 'real') assume(x0 > 0) assume(c, 'real') assume(C, 'real') assume(alpha < 0) p = function('p')(x) q = function('q')(x)
equ_diff_1 = derivative(q,x,1) == - 2/(D*D)*gamma*(x-x0)/(x*x)*q + c equ_diff_1

∂∂xq(x)=c−2 γ(x−x0)q(x)D2x2\displaystyle \frac{\partial}{\partial x}q\left(x\right) = c - \frac{2 \, \gamma {\left(x - x_{0}\right)} q\left(x\right)}{D^{2} x^{2}}

CONSTANT_VALUES = { D: 1.0e-5, gamma: 3.5 * 1.0e-5, x0 : 7.6, c: 0., C: 1.0 }
# q analytical solution a = -2/(D*D)*gamma q = C * x^a *exp(a * x0 / x) q

Ce(−2 γx0D2x)x2 γD2\displaystyle \frac{C e^{\left(-\frac{2 \, \gamma x_{0}}{D^{2} x}\right)}}{x^{\frac{2 \, \gamma}{D^{2}}}}

q_num_P1=q.substitute(CONSTANT_VALUES) q_num_P1.substitute(x=7.6)

2.22880036366235×10−920576\displaystyle 2.22880036366235 \times 10^{-920576}

# plot(1/q_num_P1.substitute(x=7.6)*q_num_P1, xmin=0.5,xmax=20,color='cyan')
from functools import lru_cache import numpy as np import matplotlib.pyplot as plt a_VALUES = sorted( ["-1 * 10^{}".format(i) for i in range(0, 7)] + ["-5 * 10^{}".format(i) for i in range(0, 7)] + ["-2 * 10^{}".format(i) for i in range(0, 7)], key=lambda t: sage_eval(t)) RRR = RealField(Integer(512)) COLOR1 = "tab:blue" COLOR2 = "tab:red" @interact def draw_plot(a_value = slider(a_VALUES, default = "-1 * 10^0", label="a")): x0_val = CONSTANT_VALUES[x0] a_value = sage_eval(a_value) q_espression= x^a_value * exp(a_value * x0_val / x) q_espression_n = q_espression/q_espression.substitute(x=x0_val) q_espression_n_compiled = fast_callable(q_espression_n, vars=[x], domain=RRR) r = q_espression_n / x^2 r_espression_n_compiled = fast_callable(r, vars=[x], domain=RRR) r_espression_norm_int1, _ = numerical_integral(r_espression_n_compiled, a=0, b=10, algorithm="qag", rule=6) r_espression_norm_int2, _ = numerical_integral(r_espression_n_compiled, a=10, b=+oo, algorithm="qag", rule=6) r_espression_norm = r_espression_norm_int1 + r_espression_norm_int2 r_final = lambda t: r_espression_n_compiled(t) / r_espression_norm p1_plot = plot( q_espression_n_compiled, xmin=0.01, xmax=20, color="blue" ) p2_plot = plot( r_final, xmin=0.01, xmax=20, color="red" ) p1_x_points = np.array(p1_plot._objects[0].xdata, dtype=np.float64) p1_y_points = np.array(p1_plot._objects[0].ydata, dtype=np.float64) p2_x_points = np.array(p2_plot._objects[0].xdata, dtype=np.float64) p2_y_points = np.array(p2_plot._objects[0].ydata, dtype=np.float64) plt.xlabel("$x$", fontsize=14) plt.title("Probabilty distribution at equilibrium") plt.gca().spines['top'].set_visible(False) plt.gca().spines['bottom'].set_position('zero') plt.ylabel("$p$", fontsize=14, color=COLOR1) plt.plot(p2_x_points, p2_y_points, color=COLOR1, linewidth=2.5) plt.fill_between(p2_x_points, p2_y_points, color=COLOR1, alpha=0.1) plt.gca().tick_params(axis='y', colors=COLOR1) ax2=plt.gca().twinx() plt.gca().spines['top'].set_visible(False) plt.gca().spines['bottom'].set_visible(False) plt.gca().spines['right'].set_color(COLOR2) plt.gca().spines['left'].set_color(COLOR1) plt.gca().tick_params(axis='y', colors=COLOR2) plt.ylabel("$q$", fontsize=14, color=COLOR2) plt.plot(p1_x_points, p1_y_points, color=COLOR2, linewidth=1.5) plt.gca().set_xlim(left=0) v_line_bottom = (x0_val, 0) v_line_bottom_display = plt.gca().transData.transform(v_line_bottom) v_line_bottom_axis = plt.gca().transAxes.inverted().transform(v_line_bottom_display) ymin_line = v_line_bottom_axis[1] plt.axvline(x = x0_val, ymin=ymin_line, color = 'tab:gray', label = '$x_0$', linestyle='--', linewidth=1)
from functools import partial from scipy.optimize import minimize_scalar XMAX=100_000 SCALE="loglog" def statistical_indicators(): x0_val = CONSTANT_VALUES[x0] q_espression = x^alpha * exp(alpha * x0_val / x) q_espression_n = q_espression / q_espression.substitute(x=x0_val) r = q_espression_n / x^2 r_espression_n_compiled = fast_callable(r, vars=[alpha, x], domain=RRR) r_espression_norm_int1 = lambda alpha_val: numerical_integral(partial(r_espression_n_compiled, alpha_val), a=0, b=10, algorithm="qag", rule=6)[0] r_espression_norm_int2 = lambda alpha_val: numerical_integral(partial(r_espression_n_compiled, alpha_val), a=10, b=+oo, algorithm="qag", rule=6)[0] r_espression_norm = lambda alpha_val: r_espression_norm_int1(alpha_val) + r_espression_norm_int2(alpha_val) def r_n(alpha_val): evaluated_norm = r_espression_norm(alpha_val) def current_r_n(x_val): return r_espression_n_compiled(alpha_val, x_val) / evaluated_norm return current_r_n def r_n_mode(alpha_val): current_r_n = r_n(- alpha_val) return x0_val - minimize_scalar(lambda t: -current_r_n(t), bounds=(0, x0_val + 0.01)).x def r_n_mean(alpha_val): current_r_n = r_n(- alpha_val) return x0_val - numerical_integral(lambda t: t * current_r_n(t), a=0, b=+oo)[0] def r_n_median(alpha_val): current_r_n = r_n(- alpha_val) return x0_val - find_root(lambda t: numerical_integral(current_r_n, a=0, b=t)[0] - 0.5, 0, 10) p1_plot = plot(r_n_median, xmin=1, xmax=XMAX, scale=SCALE, legend_label="median") p2_plot = plot(r_n_mode, xmin=1, xmax=XMAX, scale=SCALE, color="red", legend_label="mode") final_plot = p1_plot + p2_plot final_plot.axes_labels(['$- \\alpha$', 'distance from $x_0$']) final_plot.show(frame=True) statistical_indicators()
Image in a Jupyter notebook