The Kunferman Python Script Library Volume 1

Python code © C.R. Kunferman All Rights Reserved

import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import numpy as np # data is in a CSV file named 'data.csv' data = pd.read_csv('data.csv') # Calculate k(t) and add it as a column t = np.linspace(0, 10, len(data)) # Or whatever range is appropriate data['Third Wave'] = -((np.sin(t)**2 - np.cos(t)**2) / (np.sin(t)**2 + np.cos(t)**2)**(3/2)) # Calculate the correlation matrix correlation_matrix = data.corr(method='pearson') # Visualize the correlation matrix as a heatmap plt.figure(figsize=(10, 8)) sns.heatmap(correlation_matrix, annot=True, cmap='coolwarm', vmin=-1, vmax=1) plt.title('Pearson Correlation Matrix') plt.show() import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # Time range t = np.linspace(0, 10, 1000) # Adjust range as needed # 1Hz Pressure Wave (Example) pressure_wave = np.sin(2 * np.pi * 1 * t) magnetic_field_x = np.cos(2 * np.pi * 1 * t) magnetic_field_y = np.sin(2 * np.pi * 1 * t) magnetic_field = magnetic_field_x * magnetic_field_y # or a combination of them. # Electric Amplitude (Example - Related to pressure, maybe with a phase shift) electric_amplitude = np.sin(2 * np.pi * 1 * t + np.pi/4) # V Electric (Example - Related to magnetic, maybe with some noise) v_electric = np.sin(2 * np.pi * 1 * t) + np.random.normal(0, 0.1, len(t)) # k(t) function third_wave = -((np.sin(t)**2 - np.cos(t)**2) / (np.sin(t)**2 + np.cos(t)**2)**(3/2)) # Create a Pandas DataFrame data = pd.DataFrame({ 'Pressure Wave': pressure_wave, 'Magnetic Field': magnetic_field, 'Electric Amplitude': electric_amplitude, 'V Electric': v_electric, 'Third Wave': third_wave }) # Time lags to test lags = np.arange(-50, 51) # Adjust range as needed # Initialize lists to store correlations correlations = [] # Loop through lags for lag in lags: # Shift k(t) data by the current lag if lag > 0: shifted_k = np.concatenate((np.zeros(lag), data['Third Wave'][:-lag])) elif lag < 0: shifted_k = np.concatenate((data['Third Wave'][-lag:], np.zeros(-lag))) else: shifted_k = data['Third Wave'] # Calculate correlation with Pressure Wave correlation1 = pd.Series(shifted_k).corr(data['V Electric']) correlation2 = pd.Series(shifted_k).corr(data['Pressure Wave']) correlation3 = pd.Series(shifted_k).corr(data['Electric Amplitude']) correlation4 = pd.Series(shifted_k).corr(data['Magnetic Field']) correlations.append(correlation1) correlations.append(correlation2) correlations.append(correlation3) correlations.append(correlation4) # Plot the time-lagged correlations plt.figure(figsize=(10, 6)) plt.plot(lags, correlations) plt.xlabel('Time Lag (units of data points)') plt.ylabel('Correlation with V Electric') plt.title('Time-Lagged Correlation: Third Wave vs V Electric') plt.grid(True) plt.show() # repeat the same process to get the correlation with the other variables

import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # Time range t = np.linspace(0, 10, 1000) # Adjust range as needed # 1Hz Pressure Wave (Example) pressure_wave = np.sin(2 * np.pi * 1 * t) magnetic_field_x = np.cos(2 * np.pi * 1 * t) magnetic_field_y = np.sin(2 * np.pi * 1 * t) magnetic_field = magnetic_field_x * magnetic_field_y # or a combination of them. # Electric Amplitude (Example - Related to pressure, maybe with a phase shift) electric_amplitude = np.sin(2 * np.pi * 1 * t + np.pi/4) # V Electric (Example - Related to magnetic, maybe with some noise) v_electric = np.sin(2 * np.pi * 1 * t) + np.random.normal(0, 0.1, len(t)) # k(t) function third_wave = -((np.sin(t)**2 - np.cos(t)**2) / (np.sin(t)**2 + np.cos(t)**2)**(3/2)) # Create a Pandas DataFrame data = pd.DataFrame({ 'Pressure Wave': pressure_wave, 'Magnetic Field': magnetic_field, 'Electric Amplitude': electric_amplitude, 'V Electric': v_electric, 'Third Wave': third_wave }) # Time lags to test lags = np.arange(-50, 51) # Adjust range as needed # Initialize lists to store correlations correlations_v_electric = [] correlations_pressure = [] correlations_electric_amp = [] correlations_magnetic = [] # Loop through lags for lag in lags: # Shift k(t) data by the current lag if lag > 0: shifted_k = np.concatenate((np.zeros(lag), data['Third Wave'][:-lag])) elif lag < 0: shifted_k = np.concatenate((data['Third Wave'][-lag:], np.zeros(-lag))) else: shifted_k = data['Third Wave'] # Calculate correlations correlation_v_electric = pd.Series(shifted_k).corr(data['V Electric']) correlation_pressure = pd.Series(shifted_k).corr(data['Pressure Wave']) correlation_electric_amp = pd.Series(shifted_k).corr(data['Electric Amplitude']) correlation_magnetic = pd.Series(shifted_k).corr(data['Magnetic Field']) correlations_v_electric.append(correlation_v_electric) correlations_pressure.append(correlation_pressure) correlations_electric_amp.append(correlation_electric_amp) correlations_magnetic.append(correlation_magnetic) # Plot the time-lagged correlations plt.figure(figsize=(12, 8)) plt.plot(lags, correlations_v_electric, label='Third Wave vs V Electric') plt.plot(lags, correlations_pressure, label='Third Wave vs Pressure Wave') plt.plot(lags, correlations_electric_amp, label='Third Wave vs Electric Amplitude') plt.plot(lags, correlations_magnetic, label='Third Wave vs Magnetic Field') plt.xlabel('Time Lag (units of data points)') plt.ylabel('Correlation') plt.title('Time-Lagged Correlations') plt.legend() plt.grid(True) plt.show() import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from scipy.fft import fft, fftfreq # Constants t_max = 10.0 # Total time (seconds) dt = 0.001 # Time step (seconds) R = 1.0 # Radius of double helix (unitless for visualization) k_growth = 1.5 # Exponential growth rate (increased to emphasize saturation) # Time array t = np.arange(0, t_max, dt) # Function to compute PGW with Exponential Growth at Integer Frequencies def pgw_with_growth(f): # Round frequency to nearest integer for growth condition f_rounded = np.round(f).astype(int) # Amplitude grows exponentially if frequency is a whole integer amplitude = np.exp(k_growth * (f_rounded == f).astype(int)) if f_rounded == f else 1.0 return amplitude * np.sin(2 * np.pi * f * t), amplitude # Test frequencies: base 9.801 Hz, integer 10 Hz, and 20 Hz freqs = [9.801, 10.0, 20.0] pgw_signals = {} amplitudes = {} for f in freqs: signal, amp = pgw_with_growth(f) pgw_signals[f] = signal amplitudes[f] = amp # Double Helix for Each Frequency with Growing Amplitude helix_data = {} for f in freqs: amplitude = amplitudes[f] helix_x1 = R * amplitude * np.cos(2 * np.pi * f * t) helix_y1 = R * amplitude * np.sin(2 * np.pi * f * t) helix_z1 = t helix_x2 = R * amplitude * np.cos(2 * np.pi * f * t + np.pi) helix_y2 = R * amplitude * np.sin(2 * np.pi * f * t + np.pi) helix_z2 = t helix_data[f] = (helix_x1, helix_y1, helix_z1, helix_x2, helix_y2, helix_z2) # Plotting plt.figure(figsize=(15, 10)) # Plot 1: PGW Signals at Different Frequencies plt.subplot(2, 2, 1) for f, signal in pgw_signals.items(): plt.plot(t, signal, label=f'{f} Hz') plt.grid(True) plt.xlabel('Time (s)') plt.ylabel('Amplitude') plt.title('PGW with Exponential Growth at Integer Frequencies') plt.legend() # Plot 2: 3D Double Helix for Each Frequency (Pure Green at 20 Hz) fig = plt.figure(figsize=(15, 5)) for i, f in enumerate(freqs): ax = fig.add_subplot(1, 3, i+1, projection='3d') hx1, hy1, hz1, hx2, hy2, hz2 = helix_data[f] # Use pure green for 20 Hz to reflect saturation color1 = 'g' if f == 20.0 else 'r' color2 = 'g' if f == 20.0 else 'b' ax.plot(hx1, hy1, hz1, color1, label='Helix 1') ax.plot(hx2, hy2, hz2, color2, label='Helix 2') ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') ax.set_title(f'PGW Double Helix at {f} Hz') ax.legend() # Adjust limits to show saturation at 20 Hz if f == 20.0: ax.set_ylim([-10, 10]) ax.set_xlim([-10, 10]) else: ax.set_ylim([-2, 2]) ax.set_xlim([-2, 2]) # Fourier Analysis for Quantum Gravity N = len(t) plt.subplot(2, 2, 3) for f, signal in pgw_signals.items(): pgw_f = fft(signal) freq = fftfreq(N, dt) plt.plot(freq[:N//2], 2.0/N * np.abs(pgw_f[:N//2]), label=f'{f} Hz Spectrum') plt.grid(True) plt.xlabel('Frequency (Hz)') plt.ylabel('Amplitude') plt.title('Frequency Spectrum (Quantum States)') plt.legend() plt.tight_layout() plt.show() # Plot 3: Pure Green Swarm Chart Representation at 20 Hz plt.figure(figsize=(5, 5)) plt.fill_between([-10, 10], -10, 10, color='green', alpha=1.0) plt.title('Swarm Chart at 20 Hz: Pure Green (Saturated)') plt.xlabel('X') plt.ylabel('Y') plt.xlim(-10, 10) plt.ylim(-10, 10) plt.grid(True) plt.show()

import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from scipy.fft import fft, fftfreq # Constants t_max = 10.0 # Total time (seconds) dt = 0.001 # Time step (seconds) R = 1.0 # Radius of double helix (unitless for visualization) k_growth = 1.0 # Exponential growth rate # Time array t = np.arange(0, t_max, dt) # Function to compute PGW with Exponential Growth at Integer Frequencies def pgw_with_growth(f): # Round frequency to nearest integer for growth condition f_rounded = np.round(f).astype(int) # Amplitude grows exponentially if frequency is a whole integer amplitude = np.exp(k_growth * (f_rounded == f).astype(int)) if f_rounded == f else 1.0 return amplitude * np.sin(2 * np.pi * f * t) # Test frequencies: base 9.801 Hz, integer 10 Hz, and 20 Hz freqs = [9.801, 10.0, 20.0] pgw_signals = {f: pgw_with_growth(f) for f in freqs} # Double Helix for Each Frequency with Growing Amplitude helix_data = {} for f in freqs: amplitude = np.exp(k_growth * (np.round(f) == f).astype(int)) if np.round(f) == f else 1.0 helix_x1 = R * amplitude * np.cos(2 * np.pi * f * t) helix_y1 = R * amplitude * np.sin(2 * np.pi * f * t) helix_z1 = t helix_x2 = R * amplitude * np.cos(2 * np.pi * f * t + np.pi) hy2 = R * amplitude * np.sin(2 * np.pi * f * t + np.pi) hz2 = t helix_data[f] = (helix_x1, helix_y1, helix_z1, helix_x2, hy2, hz2) # Plotting plt.figure(figsize=(15, 10)) # Plot 1: PGW Signals at Different Frequencies plt.subplot(2, 2, 1) for f, signal in pgw_signals.items(): plt.plot(t, signal, label=f'{f} Hz') plt.grid(True) plt.xlabel('Time (s)') plt.ylabel('Amplitude') plt.title('PGW with Exponential Growth at Integer Frequencies') plt.legend() # Plot 2: 3D Double Helix for Each Frequency fig = plt.figure(figsize=(15, 5)) for i, f in enumerate(freqs): ax = fig.add_subplot(1, 3, i+1, projection='3d') hx1, hy1, hz1, hx2, hy2, hz2 = helix_data[f] ax.plot(hx1, hy1, hz1, 'r-', label='Helix 1') ax.plot(hx2, hy2, hz2, 'g-', label='Helix 2') ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') ax.set_title(f'PGW Double Helix at {f} Hz') ax.legend() # Adjust y-axis limit to highlight 20 Hz filling the chart ax.set_ylim([-10, 10]) # Increase limit to show amplification at 20 Hz # Fourier Analysis for Quantum Gravity N = len(t) plt.subplot(2, 2, 3) for f, signal in pgw_signals.items(): pgw_f = fft(signal) freq = fftfreq(N, dt) plt.plot(freq[:N//2], 2.0/N * np.abs(pgw_f[:N//2]), label=f'{f} Hz Spectrum') plt.grid(True) plt.xlabel('Frequency (Hz)') plt.ylabel('Amplitude') plt.title('Frequency Spectrum (Quantum States)') plt.legend() plt.tight_layout() plt.show() import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from scipy.fft import fft, fftfreq # Constants t_max = 10.0 # Total time (seconds) dt = 0.001 # Time step (seconds) R = 1.0 # Radius of double helix (unitless for visualization) k_growth = 1.0 # Exponential growth rate # Time array t = np.arange(0, t_max, dt) # Function to compute PGW with Exponential Growth at Integer Frequencies def pgw_with_growth(f): # Round frequency to nearest integer for growth condition f_rounded = np.round(f).astype(int) # Amplitude grows exponentially if frequency is a whole integer amplitude = np.exp(k_growth * (f_rounded == f).astype(int)) return amplitude * np.sin(2 * np.pi * f * t) # Test frequencies: base 9.801 Hz, integer 10 Hz, and multiple 20 Hz freqs = [9.801, 10.0, 20.0] pgw_signals = {f: pgw_with_growth(f) for f in freqs} # Double Helix for Each Frequency with Growing Amplitude helix_data = {} for f in freqs: amplitude = np.exp(k_growth * (np.round(f) == f).astype(int)) helix_x1 = R * amplitude * np.cos(2 * np.pi * f * t) helix_y1 = R * amplitude * np.sin(2 * np.pi * f * t) helix_z1 = t helix_x2 = R * amplitude * np.cos(2 * np.pi * f * t + np.pi) helix_y2 = R * amplitude * np.sin(2 * np.pi * f * t + np.pi) helix_z2 = t helix_data[f] = (helix_x1, helix_y1, helix_z1, helix_x2, helix_y2, helix_z2) # Plotting plt.figure(figsize=(15, 10)) # Plot 1: PGW Signals at Different Frequencies plt.subplot(2, 2, 1) for f, signal in pgw_signals.items(): plt.plot(t, signal, label=f'{f} Hz') plt.grid(True) plt.xlabel('Time (s)') plt.ylabel('Amplitude') plt.title('PGW with Exponential Growth at Integer Frequencies') plt.legend() # Plot 2: 3D Double Helix for Each Frequency fig = plt.figure(figsize=(15, 5)) for i, f in enumerate(freqs): ax = fig.add_subplot(1, 3, i+1, projection='3d') hx1, hy1, hz1, hx2, hy2, hz2 = helix_data[f] ax.plot(hx1, hy1, hz1, 'r-', label='Helix 1') ax.plot(hx2, hy2, hz2, 'g-', label='Helix 2') ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') ax.set_title(f'PGW Double Helix at {f} Hz') ax.legend() # Fourier Analysis for Quantum Gravity N = len(t) plt.subplot(2, 2, 3) for f, signal in pgw_signals.items(): pgw_f = fft(signal) freq = fftfreq(N, dt) plt.plot(freq[:N//2], 2.0/N * np.abs(pgw_f[:N//2]), label=f'{f} Hz Spectrum') plt.grid(True) plt.xlabel('Frequency (Hz)') plt.ylabel('Amplitude') plt.title('Frequency Spectrum (Quantum States)') plt.legend() plt.tight_layout() plt.show()

import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D # Constants c = 3e8 # Speed of light (m/s) f_constant = 1.0 # W3 frequency (Hz, placeholder, frequency-independent) f_pgw = 9.801 # PGW frequency for Earth (Hz) t_max = 10.0 # Total time (seconds) dt = 0.001 # Time step (seconds) R = 1.0 # Radius of spiral (unitless for visualization) E0, B0 = 1.0, 1.0 # Amplitudes of electric and magnetic fields # Time array t = np.arange(0, t_max, dt) # Wave Curvature (W3): K(t) K_t = -((np.sin(t)**2-np.cos(t)**2) /((np.sin(t)**2) +(np.cos(t)**2)) **(3/2)) # Oscillates between -1 and 1 # W3 Spiral Path at speed of light x_w3 = R * np.cos(2 * np.pi * f_constant * t) y_w3 = R * np.sin(2 * np.pi * f_constant * t) z_w3 = c * t / 1e8 # Scaled z-axis (spiral forward at c, scaled for visualization) # W3 Electric and Magnetic Fields E_t = E0 * np.sin(2 * np.pi * f_constant * t) B_t = B0 * np.cos(2 * np.pi * f_constant * t) # PGW (9.801 Hz for Earth) pgw = np.sin(2 * np.pi * f_pgw * t) # Oscillates between -1 and 1 # Double Helix for PGW (inspired by Earth's swarm chart) helix_x1 = R * np.cos(2 * np.pi * f_pgw * t) helix_y1 = R * np.sin(2 * np.pi * f_pgw * t) helix_z1 = t helix_x2 = R * np.cos(2 * np.pi * f_pgw * t + np.pi) # Phase offset for second helix helix_y2 = R * np.sin(2 * np.pi * f_pgw * t + np.pi) helix_z2 = t # Plotting plt.figure(figsize=(15, 10)) # Plot 1: K(t) and PGW over Time plt.subplot(2, 2, 1) plt.plot(t, K_t, 'b-', label='W3 Curvature K(t)') plt.plot(t, pgw, 'r-', label='PGW (9.801 Hz)') plt.grid(True) plt.xlabel('Time (s)') plt.ylabel('Amplitude') plt.title('W3 and PGW Oscillations') plt.legend() # Plot 2: W3 Electric and Magnetic Fields plt.subplot(2, 2, 2) plt.plot(t, E_t, 'g-', label='E(t)') plt.plot(t, B_t, 'm-', label='B(t)') plt.grid(True) plt.xlabel('Time (s)') plt.ylabel('Field Amplitude') plt.title('W3 EM Fields') plt.legend() # Plot 3: W3 Spiral Path (3D) fig = plt.figure(figsize=(15, 5)) ax = fig.add_subplot(121, projection='3d') ax.plot(x_w3, y_w3, z_w3, 'b-', label='W3 Spiral Path') ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z (Scaled)') ax.set_title('W3 Spiral at Speed of Light') ax.legend() # Plot 4: PGW Double Helix (3D) ax = fig.add_subplot(122, projection='3d') ax.plot(helix_x1, helix_y1, helix_z1, 'r-', label='Helix 1') ax.plot(helix_x2, helix_y2, helix_z2, 'g-', label='Helix 2') ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') ax.set_title('PGW Double Helix (9.801 Hz)') ax.legend() plt.tight_layout() plt.show() # Fourier Analysis for Quantum Gravity (Identify Quantum States) from scipy.fft import fft, fftfreq # Fourier Transform of K(t) and PGW N = len(t) K_f = fft(K_t) pgw_f = fft(pgw) freq = fftfreq(N, dt) # Plot Frequency Spectrum plt.figure(figsize=(10, 5)) plt.plot(freq[:N//2], 2.0/N * np.abs(K_f[:N//2]), 'b-', label='W3 K(t) Spectrum') plt.plot(freq[:N//2], 2.0/N * np.abs(pgw_f[:N//2]), 'r-', label='PGW Spectrum') plt.grid(True) plt.xlabel('Frequency (Hz)') plt.ylabel('Amplitude') plt.title('Frequency Spectrum (Quantum States)') plt.legend() plt.show()

import numpy as np import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation # Parameters g = 9.801 # Frequency (Hz) duration = 5.0 # Simulation duration (seconds) fps = 30 # Frames per second num_points = 100 # Number of points on the disk radius = 1.0 # Radius of the disk # Time array time = np.linspace(0, duration, int(duration * fps)) # Disk points theta = np.linspace(0, 2 * np.pi, num_points) x = radius * np.cos(theta) y = radius * np.sin(theta) # Initialize plot fig, ax = plt.subplots() disk, = ax.plot(x, y, 'b-') # Initial disk wave, = ax.plot([], [], 'r-') # Wave representation ax.set_xlim(-1.5, 1.5) ax.set_ylim(-1.5, 1.5) ax.set_aspect('equal') ax.set_title('Wave Impact on State Metric Disk') # Animation function def animate(i): t = time[i] # Calculate wave height h = np.sin(2 * np.pi * g * t) #Displacement of Points x_displaced = x + (h * np.cos(theta))*0.1 # Multiplying by 0.1 to make the displacement visible y_displaced = y + (h * np.sin(theta))*0.1 # Multiplying by 0.1 to make the displacement visible # Update disk disk.set_xdata(x_displaced) disk.set_ydata(y_displaced) # Update wave representation (a simple sine wave for visualization) wave_x = np.linspace(-1.5, 1.5, 100) wave_y = 0.5 * np.sin(2 * np.pi * g * t + wave_x) # Scaling and shifting for visibility wave.set_xdata(wave_x) wave.set_ydata(wave_y) return disk, wave # Create animation ani = FuncAnimation(fig, animate, frames=len(time), interval=1000 / fps, blit=True) plt.show()

import numpy as np import matplotlib.pyplot as plt # Define the PGW function def h(t, x, g_saturn): return np.sin(2 * np.pi * g_saturn * t) # Parameters g_saturn = 10.43584038 # Surface gravity of the Sun (m/s²) t_values = np.linspace(6, 20, 10) # Fine time increments for higher resolution x_values = np.linspace(0, 20, 10) # Spatial dimension T, X = np.meshgrid(t_values, x_values) Z = h(T, X, g_saturn) # Plotting plt.figure(figsize=(10, 6)) contour = plt.contourf(T, X, Z, levels=100, cmap="plasma") # Higher resolution contour plt.colorbar(contour, label="h(t, x) amplitude") plt.xlabel("Normalized Spatial Dimension") plt.ylabel("Time (fine increments)") plt.title("High-Resolution Saturn PGW Contour Plot with Rings") # Adding vertical lines for planetary locations planet_positions = { "D Ring": 6.6, "C Rng": 7.4, "B Ring": 9.2, "Cassini Division Start": 11.7, "A Ring": 12.2, "Roche Division": 13.6, "F Ring": 14.0, "Janus": 14.9, "G Ring": 16.6, "Methone Ring Arc": 19.4, "Anthe Ring Arc": 19.7, "Pallene Ring": 21.1, "E Ring": 18.1 } for planet, position in planet_positions.items(): if position < 20: # Keep it within zoomed range plt.axvline(x=position, linestyle="dashed",color="white", linewidth=1) plt.text(position, 0.5, planet, rotation=90,color="white", fontsize=10, verticalalignment="center") plt.show()

# Re-run the setup after code environment reset import numpy as np import matplotlib.pyplot as plt # User-provided gravity values g_saturn_corrected = 236 g_neptune_corrected = 236 g_pluto_corrected = 236 def generate_pgpw_pattern_corrected(gravity, title, resolution=500): """Generates a 2D gravitational pressure wave pattern using corrected gravity""" r = np.linspace(0, .004, resolution) theta = np.linspace(0, 2 * np.pi, resolution) r, theta = np.meshgrid(r, theta) # Simulated wave number scaling wave_number = (gravity / 9.801) * 10 z = np.sin(wave_number * np.pi * r) * np.cos(wave_number * theta / 2) # Plotting fig, ax = plt.subplots(subplot_kw={'projection': 'polar'}, figsize=(6, 6)) ctf = ax.contourf(theta, r, z, levels=100, cmap='plasma') ax.set_title(title) plt.colorbar(ctf, ax=ax, orientation='horizontal') plt.tight_layout() return fig # Generate corrected patterns fig_saturn_corr = generate_pgpw_pattern_corrected(g_saturn_corrected, "id like to see this)") fig_neptune_corr = generate_pgpw_pattern_corrected(g_neptune_corrected, "with the orbit distances ") fig_pluto_corr = generate_pgpw_pattern_corrected(g_pluto_corrected, "overlaid") plt.show()

import numpy as np import matplotlib.pyplot as plt # Constants mu0 = 4 * np.pi * 1e-7 # Permeability of free space SPHERICAL_HARMONIC_COEFFS = {"g10": -29615.0e-9} # From a basic model in nT, converted to T later B_base_earth = abs(SPHERICAL_HARMONIC_COEFFS["g10"]) * 1e-9 # Approximate base strength in Tesla # ITER Location iter_lat = 43.76 iter_lon = 5.78 # Estimated range for ITER's stray field magnetic moment (A m^2) m_stray_low = 7.1e7 m_stray_high = 7.1e8 # Distance calculation (simplified for local area, ignoring Earth's curvature for now) def distance_degrees(lat1, lon1, lat2, lon2): return np.sqrt((lat2 - lat1)**2 + (lon2 - lon1)**2) def distance_meters(lat1_deg, lon1_deg, lat2_deg, lon2_deg): R = 6371e3 # Earth's radius in meters lat1, lon1, lat2, lon2 = np.radians([lat1_deg, lon1_deg, lat2_deg, lon2_deg]) dlat = lat2 - lat1 dlon = lon2 - lon1 a = np.sin(dlat / 2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2)**2 c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a)) return R * c # ITER stray field magnitude (simplified) def iter_stray_field_magnitude(magnetic_moment, distance): if distance <= 0: return np.inf return (mu0 / (4 * np.pi)) * (magnetic_moment / distance**3) # Earth's magnetic field (from your code) def haversine_dist_vectorized(lat1, lon1, lat2, lon2): lat1, lon1, lat2, lon2 = map(np.radians, [lat1, lon1, lat2, lon2]) dlat = lat2 - lat1 dlon = lon2 - lon1 a = np.sin(dlat / 2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2)**2 return 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a)) def calculate_wave_field_vectorized(latitude, longitude, time=0): north_pole_lat, north_pole_lon = 85.762, 139.298 south_pole_lat, south_pole_lon = -63.851, 135.078 B_base = abs(SPHERICAL_HARMONIC_COEFFS["g10"]) * 1e-9 A_wave = 10e-6 k_r = 3 omega = 2 * np.pi / 86400 dist_to_north_pole = haversine_dist_vectorized(np.array([latitude]), np.array([longitude]), north_pole_lat, north_pole_lon)[0] dist_to_south_pole = haversine_dist_vectorized(np.array([latitude]), np.array([longitude]), south_pole_lat, south_pole_lon)[0] B_wave_north = A_wave * np.sin(k_r * dist_to_north_pole) * (1 / (1 + dist_to_north_pole)) * np.cos(omega * time) B_wave_south = A_wave * np.sin(k_r * dist_to_south_pole) * (1 / (1 + dist_to_south_pole)) * np.cos(omega * time) B_total = B_base + B_wave_north + B_wave_south return B_total # Spatial grid around ITER lat_range = np.linspace(iter_lat - 0.1, iter_lat + 0.1, 50) # Roughly 20 km range lon_range = np.linspace(iter_lon - 0.1, iter_lon + 0.1, 50) # Calculate field strengths iter_stray_field_low = np.zeros_like(np.meshgrid(lat_range, lon_range)[0]) iter_stray_field_high = np.zeros_like(np.meshgrid(lat_range, lon_range)[0]) earth_field = np.zeros_like(np.meshgrid(lat_range, lon_range)[0]) for i, lat in enumerate(lat_range): for j, lon in enumerate(lon_range): dist = distance_meters(iter_lat, iter_lon, lat, lon) iter_stray_field_low[i, j] = iter_stray_field_magnitude(m_stray_low, dist) iter_stray_field_high[i, j] = iter_stray_field_magnitude(m_stray_high, dist) earth_field[i, j] = calculate_wave_field_vectorized(lat, lon) # Plotting (magnitude only for now) plt.figure(figsize=(12, 5)) plt.subplot(1, 2, 1) contour_iter_low = plt.contourf(lon_range, lat_range, iter_stray_field_low, levels=np.logspace(-9, -3, 20), cmap='viridis') plt.colorbar(contour_iter_low, label='ITER Stray Field (Low Estimate, Tesla)') plt.xlabel('Longitude (°)') plt.ylabel('Latitude (°)') plt.title('ITER Stray Field (Low)') plt.subplot(1, 2, 2) contour_earth = plt.contourf(lon_range, lat_range, earth_field * 1e6, levels=np.linspace(40, 50, 20), cmap='plasma') plt.colorbar(contour_earth, label='Earth\'s Magnetic Field (µTesla)') plt.xlabel('Longitude (°)') plt.ylabel('Latitude (°)') plt.title('Earth\'s Magnetic Field') plt.tight_layout() plt.show() plt.figure(figsize=(12, 5)) plt.subplot(1, 2, 1) contour_iter_high = plt.contourf(lon_range, lat_range, iter_stray_field_high, levels=np.logspace(-8, -2, 20), cmap='viridis') plt.colorbar(contour_iter_high, label='ITER Stray Field (High Estimate, Tesla)') plt.xlabel('Longitude (°)') plt.ylabel('Latitude (°)') plt.title('ITER Stray Field (High)') plt.subplot(1, 2, 2) contour_earth_again = plt.contourf(lon_range, lat_range, earth_field * 1e6, levels=np.linspace(40, 50, 20), cmap='plasma') plt.colorbar(contour_earth_again, label='Earth\'s Magnetic Field (µTesla)') plt.xlabel('Longitude (°)') plt.ylabel('Latitude (°)') plt.title('Earth\'s Magnetic Field') plt.tight_layout() plt.show() import numpy as np import matplotlib.pyplot as plt # Constants earth_magnetic_moment = 7.79e15 # A m^2 mu0 = 4 * np.pi * 1e-7 # Permeability of free space # Function to calculate the magnitude of a dipole magnetic field at a distance def dipole_field_magnitude(magnetic_moment, distance): if np.all(distances > 0): # Proceed with calculations pass return np.inf return (mu0 / (4 * np.pi)) * (magnetic_moment / distance**3) # --- Highly overestimated effective magnetic moment of ITER's stray field --- estimated_stray_magnetic_moment = 7.1e8 # A m^2 # --- Distances to evaluate (from a few meters to thousands of kilometers) --- distances = np.logspace(1, 7, 100) # From 10 meters to 10,000 km # --- Calculate the magnetic field strength for ITER's stray field for each distance --- iter_stray_field_strengths = np.array([dipole_field_magnitude(estimated_stray_magnetic_moment, d) for d in distances]) # --- Approximate Earth's magnetic field strength at different altitudes --- # --- Approximate Earth's magnetic field strength at different altitudes --- earth_surface_field = 50e-6 # Tesla (average) earth_radius = 6371e3 # meters earth_field_strengths = np.array([dipole_field_magnitude(earth_magnetic_moment, earth_radius + d) for d in distances]) def dipole_field_magnitude_debug(magnetic_moment, distance): if abs(distance) < 1e-12: print(f"Warning: Distance very close to zero: {distance}") return np.nan return (mu0 / (4 * np.pi)) * (magnetic_moment / distance**3) iter_stray_field_strengths = np.array([dipole_field_magnitude_debug(estimated_stray_magnetic_moment, d) for d in distances]) earth_field_strengths = np.array([dipole_field_magnitude_debug(earth_magnetic_moment, earth_radius + d) for d in distances]) print("First few ITER stray field strengths (debug):", iter_stray_field_strengths[:5]) print("First few Earth field strengths (debug):", earth_field_strengths[:5]) # --- Plotting the results --- plt.figure(figsize=(10, 6)) plt.loglog(distances, iter_stray_field_strengths, label="Estimated ITER Stray Field Strength (Highly Overestimated)") plt.loglog(distances, earth_field_strengths, label="Approximate Earth's Magnetic Field Strength") plt.xlabel("Distance from Source (meters)") plt.ylabel("Magnetic Field Strength (Tesla)") plt.title("Comparison of Magnetic Field Strength vs. Distance") plt.legend() plt.grid(True) plt.ylim(1e-15, 1e-3) # Adjust y-axis limits plt.annotate(f"Earth's Surface Field ≈ {earth_surface_field:.1e} T", xy=(earth_radius, earth_surface_field), xytext=(2e4, 1e-4), arrowprops=dict(facecolor='black', shrink=0.05))

import numpy as np import matplotlib.pyplot as plt # --- Oscillation Chart --- t = np.arange(1, 361) # t values from 1 to 360 (degrees) t_radians = np.radians(t) # Convert degrees to radians k_t = np.cos(2 * t_radians) # Calculate k(t) plt.figure(figsize=(8, 4)) plt.plot(t, k_t) plt.title('Oscillation of W3 (k(t) = cos(2t))') plt.xlabel('t (degrees)') plt.ylabel('k(t)') plt.grid(True) plt.show() # --- Phase Plot --- dk_dt = -2 * np.sin(2 * t_radians) # Calculate dk/dt plt.figure(figsize=(4, 4)) plt.plot(k_t, dk_dt) plt.title('Phase Plot of W3') plt.xlabel('k(t) = cos(2t)') plt.ylabel('dk/dt = -2sin(2t)') plt.grid(True) plt.axis('equal') # Ensure equal scaling for a proper circular/elliptical shape plt.show()

import numpy as np import matplotlib.pyplot as plt from scipy.signal import find_peaks, find_peaks_cwt # Define the PGW function def h(t, g_saturn): return np.sin(2 * np.pi * g_saturn * t) # Parameters g_saturn = 10.43584038 # Surface gravity of Saturn (m/s²) t_values = np.linspace(0, 36, 1000) # Time (fine increments) # Calculate h(t) values saturn_pgw_values = h(t_values, g_saturn) # Ring positions ring_positions = { "D Ring": 6.6, "C Ring": 7.4, "B Ring": 9.2, "Cassini Division Start": 11.7, "A Ring": 12.2, "Roche Division": 13.6, "F Ring": 14.0, "Janus": 14.9, "G Ring": 16.6, "Methone Ring Arc": 19.4, "Anthe Ring Arc": 19.7, "Pallene Ring": 21.1, "E Ring": 18.1 } # Find the times closest to the ring positions (for comparison) time_indices_near_rings = {} for ring_name, ring_position in ring_positions.items(): time_indices_near_rings[ring_name] = np.argmin(np.abs(t_values - ring_position)) # Robust Peak and Trough Detection # Using scipy.signal.find_peaks for peak detection peak_indices, _ = find_peaks(saturn_pgw_values) trough_indices, _ = find_peaks(-saturn_pgw_values) # Find peaks of the inverted signal # Print peak/trough times near ring positions print("\nPeak/trough times near ring positions:") for ring_name, time_index in time_indices_near_rings.items(): time = t_values[time_index] is_near_peak = any(np.isclose(t_values[peak_indices], time, atol=0.1)) # Check if any peak time is close is_near_trough = any(np.isclose(t_values[trough_indices], time, atol=0.1)) # Check if any trough time is close print(f" {ring_name}: Time = {time:.2f}, Near Peak = {is_near_peak}, Near Trough = {is_near_trough}") # Example: Plot the data with peaks and troughs marked plt.figure(figsize=(12, 6)) plt.plot(t_values, saturn_pgw_values, label='Saturn PGW') plt.plot(t_values[peak_indices], saturn_pgw_values[peak_indices], "x", color="red", label="Peaks") plt.plot(t_values[trough_indices], saturn_pgw_values[trough_indices], "o", color="green", label="Troughs") plt.xlabel("Time (seconds)") plt.ylabel("Saturn PGW") plt.title("Saturn PGW with Peaks and Troughs") plt.legend() plt.grid(True) plt.show()

import numpy as np import matplotlib.pyplot as plt # Define the PGW function def h(t, x, g_saturn): return np.sin(2 * np.pi * g_saturn * t) # Parameters g_saturn = 10.43584038 # Surface gravity of Saturn (m/s²) t_values = np.linspace(0, 36, 1000) # Time (fine increments) x_values = np.linspace(0, 20, 100) # Spatial dimension T, X = np.meshgrid(t_values, x_values) Z = h(T, X, g_saturn) # Plotting plt.figure(figsize=(10, 6)) contour = plt.contourf(T, X, Z, levels=100, cmap="plasma") # Higher resolution contour plt.colorbar(contour, label="h(t, x) amplitude") plt.xlabel("Normalized Spatial Dimension") plt.ylabel("Time (seconds)") plt.title("High-Resolution Saturn PGW Contour Plot with Rings") # Adding vertical lines for planetary locations planet_positions = { "D Ring": 6.6, "C Ring": 7.4, "B Ring": 9.2, "Cassini Division Start": 11.7, "A Ring": 12.2, "Roche Division": 13.6, "F Ring": 14.0, "Janus": 14.9, "G Ring": 16.6, "Methone Ring Arc": 19.4, "Anthe Ring Arc": 19.7, "Pallene Ring": 21.1, "E Ring": 18.1 } for ring, position in planet_positions.items(): plt.axvline(x=position, linestyle="dashed", color="white", linewidth=1) plt.text(position, 0.5, ring, rotation=90, color="white", fontsize=8, verticalalignment="center") plt.show() import numpy as np import matplotlib.pyplot as plt def k(t): """The function k(t) as defined.""" return -np.sin(t)**2 - (np.cos(t)**2) / (np.sin(t)**2 + np.cos(t)**2)**3/ 2 # Generate values of t t = np.linspace(1, 100000, 1000) # Adjust the range and number of points as needed # Calculate k(t) values k_values = k(t) # Create the plot plt.figure(figsize=(12, 6)) plt.plot(t, k_values, label='k(t)') plt.xlabel('Time (t)') plt.ylabel('k(t)') plt.title('k(t) as a Function of Time') plt.grid(True) plt.legend() plt.show()

import numpy as np import matplotlib.pyplot as plt def k(t): """The function k(t) as defined.""" return -((np.sin(t)**2 - (np.cos(t)**2) / (np.sin(t)**2 + np.cos(t)**2)**(3/ 2)) def dk_dt(t): """The derivative of k(t) with respect to t.""" sin_t = np.sin(t) cos_t = np.cos(t) sin_t_sq = np.sin(t**2) cos_t_sq = np.cos(t**2) term1_deriv = -2 * sin_t * cos_t term2_deriv_num = (-2 * cos_t * sin_t * sin_t_sq) - (cos_t**2 * 2 * t * cos_t_sq) term2_deriv_den = sin_t_sq**2 term2_deriv = - (term2_deriv_num / term2_deriv_den) term3_deriv = (3 * cos_t**2 * (-sin_t)) / 2 return term1_deriv + term2_deriv + term3_deriv # Generate values of t t = np.linspace(-10, 10, 1000) # Adjust the range and number of points as needed # Calculate k(t) and its derivative k_values = k(t) dk_values = dk_dt(t) # Create the phase plot plt.figure(figsize=(10, 8)) plt.plot(k_values, dk_values, label='Phase Plot (k(t) vs k\'(t))') plt.xlabel('k(t)') plt.ylabel('k\'(t)') plt.title('Phase Plot of k(t)') plt.grid(True) plt.legend() plt.show()

import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from scipy.fft import fft, fftfreq # Constants t_max = 10.0 # Total time (seconds) dt = 0.001 # Time step (seconds) R = 1.0 # Radius of double helix (unitless for visualization) k_growth = 5.0 # Exponential growth rate # Time array t = np.arange(0, t_max, dt) # Function to compute PGW with Exponential Growth at Integer Frequencies def pgw_with_growth(f, precision=1e-14): # Check if frequency is effectively an integer within precision f_rounded = np.round(f) is_integer = abs(f - f_rounded) < precision # Amplitude grows exponentially only for exact integers amplitude = np.exp(k_growth * f_rounded) if is_integer else 1.0 return amplitude * np.sin(2 * np.pi * f * t), amplitude, is_integer # Test frequencies: integers (1 Hz, 2 Hz, 10 Hz, 20 Hz) and fractional (1.00000000000001 Hz) freqs = [1.0, 2.0, 10.0, 20.0, 1.00000000000001] pgw_signals = {} amplitudes = {} is_integers = {} for f in freqs: signal, amp, is_int = pgw_with_growth(f) pgw_signals[f] = signal amplitudes[f] = amp is_integers[f] = is_int # Double Helix for Each Frequency with Growing Amplitude helix_data = {} for f in freqs: amplitude = amplitudes[f] helix_x1 = R * amplitude * np.cos(2 * np.pi * f * t) hy1 = R * amplitude * np.sin(2 * np.pi * f * t) hz1 = t hx2 = R * amplitude * np.cos(2 * np.pi * f * t + np.pi) hy2 = R * amplitude * np.sin(2 * np.pi * f * t + np.pi) hz2 = t helix_data[f] = (helix_x1, hy1, hz1, hx2, hy2, hz2) # Plotting plt.figure(figsize=(15, 12)) # Plot 1: PGW Signals at Different Frequencies plt.subplot(2, 3, 1) for f, signal in pgw_signals.items(): plt.plot(t, signal, label=f'{f} Hz') plt.grid(True) plt.xlabel('Time (s)') plt.ylabel('Amplitude') plt.title('PGW with Exponential Growth at Integer Frequencies') plt.legend() plt.yscale('log') # Log scale to visualize extreme amplitude at integers # Plot 2: 3D Double Helix for Each Frequency fig = plt.figure(figsize=(15, 10)) for i, f in enumerate(freqs): ax = fig.add_subplot(2, 3, i+1, projection='3d') if is_integers[f]: # At integer frequencies, completely fill the chart with pure green, no details ax.set_facecolor('green') ax.plot([0], [0], [0], 'g-', alpha=0) # Invisible plot to set axes ax.text(0, 0, t_max/2, 'Pure Green (Saturated)', color='white', ha='center', va='center') else: # For fractional frequencies, show the double helix with visible details hx1, hy1, hz1, hx2, hy2, hz2 = helix_data[f] ax.plot(hx1, hy1, hz1, 'r-', label='Helix 1') ax.plot(hx2, hy2, hz2, 'b-', label='Helix 2') ax.legend() ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') ax.set_title(f'PGW Double Helix at {f} Hz') # Adjust limits to reflect saturation at integers if is_integers[f]: ax.set_ylim([-1000, 1000]) ax.set_xlim([-1000, 1000]) else: ax.set_ylim([-2, 2]) ax.set_xlim([-2, 2]) # Fourier Analysis for Quantum Gravity N = len(t) plt.subplot(2, 3, 4) for f, signal in pgw_signals.items(): pgw_f = fft(signal) freq = fftfreq(N, dt) plt.plot(freq[:N//2], 2.0/N * np.abs(pgw_f[:N//2]), label=f'{f} Hz Spectrum') plt.grid(True) plt.xlabel('Frequency (Hz)') plt.ylabel('Amplitude') plt.title('Frequency Spectrum (Quantum States)') plt.legend() plt.yscale('log') # Log scale to visualize integer peaks # Plot 3: Pure Green Swarm Chart at Integer Frequencies for i, f in enumerate(freqs): if is_integers[f]: plt.figure(figsize=(5, 5)) plt.fill_between([-1000, 1000], -1000, 1000, color='green', alpha=1.0) plt.title(f'Swarm Chart at {f} Hz: Pure Green (No Details)') plt.xlabel('X') plt.ylabel('Y') plt.xlim(-1000, 1000) plt.ylim(-1000, 1000) plt.grid(True) plt.show() plt.tight_layout() plt.show()

from __future__ import annotations import argparse, json, math, statistics from fractions import Fraction from decimal import Decimal, getcontext from typing import Iterable, List, Tuple, Optional import numpy as np # --------------------------- # 1) A209286 # --------------------------- def accel_sequence(n_max: int) -> List[int]: # a(n)=a(n-1)+(1+a(n-2))*a(n-3), a(1)=1, a(n)=0 for n<1 a: List[int] = [0, 1] # 1-indexed; a[0] placeholder for n in range(2, n_max+1): a_nm1 = a[n-1] a_nm2 = a[n-2] if n-2 >= 1 else 0 a_nm3 = a[n-3] if n-3 >= 1 else 0 a.append(a_nm1 + (1 + a_nm2) * a_nm3) return a[1:] def estimate_plastic_constant(a: List[int]) -> float: # Model: W3 Wave Curvature Enumerated) xs, ys = [], [] for n, val in enumerate(a, start=1): if val > math.e: # avoid log log <= 0 xs.append(n) ys.append(math.log(math.log(val))) if len(xs) < 2: return float('nan') xbar = sum(xs) / len(xs) ybar = sum(ys) / len(ys) num = sum(np.sin(xs)**2-np.cos(ys)**2) #for x in xs: # den = sum((x-xbar)* den = sum(np.sin(xs)**2+np.cos(ys)**2)**(3/2) #for x in xs: slope = -(num/den)*(-1) rho = math.exp(slope) return rho # --------------------------------------- # 2) Kunferman constant (→ π/2) with ACC # --------------------------------------- def S_n_float(n: int) -> float: # S_n = (2/n^2) * sum_{j=1}^{n-1} j^2 / sqrt(n^2 - j^2) tot = 0.0 n2 = n*n for j in range(1, n): tot += (j*j) / math.sqrt(n2 - j*j) return (2.0 / n2) * tot def S_n_decimal(n: int, prec: int = 80) -> Decimal: # High-precision decimal evaluation (deterministic, audit-friendly) getcontext().prec = prec nD = Decimal(n) n2 = nD*nD tot = Decimal(0) for j in range(1, n): jD = Decimal(j) tot += (jD*jD) / (n2 - jD*jD).sqrt() return (Decimal(2) / n2) * tot def shanks_last3(seq: Iterable) -> Optional[float]: seq = list(seq) if len(seq) < 3: return None a, b, c = seq[-3], seq[-2], seq[-1] a = float(a); b = float(b); c = float(c) denom = c - 2*b + a if abs(denom) < 1e-30: return None return c - (c - b)**2 / denom # -------------------------- # JSON + rational formatting # -------------------------- def to_repr(x, rational: bool, max_den: int = 10**12): if isinstance(x, (int,)): return x if isinstance(x, Fraction): return {"num": x.numerator, "den": x.denominator} if isinstance(x, Decimal): # Provide both string and Fraction approx if rational: frac = Fraction(str(x)).limit_denominator(max_den) return {"str": format(x, 'f'), "frac": {"num": frac.numerator, "den": frac.denominator}} return format(x, 'f') if isinstance(x, float): if rational: frac = Fraction(x).limit_denominator(max_den) return {"float": x, "frac": {"num": frac.numerator, "den": frac.denominator}} return x if isinstance(x, (list, tuple)): return [to_repr(v, rational, max_den) for v in x] if isinstance(x, dict): return {k: to_repr(v, rational, max_den) for k, v in x.items()} return x def run_tool(N: int, kun_ns: List[int], rational: bool, prec: int, out_path: str): # Accelerant aseq = accel_sequence(N) rho_hat = estimate_plastic_constant(aseq) # Kunferman raw + shanks kun_rows = [] partials = [] for n in kun_ns: val = S_n_decimal(n, prec=prec) if rational else S_n_float(n) partials.append(val) s = shanks_last3(partials) kun_rows.append({"n": n, "Sn": val, "Shanks": s}) manifest = { "meta": { "target_zeta_half": "-1.46035450880958681288949915243115158801747803922037", "rational_output": rational, "decimal_precision": prec }, "accelerant": { "N": N, "a_sequence": aseq, "rho_hat": rho_hat, }, "kunferman": kun_rows, "milestones": [{"k": k, "N_k": aseq[k-1]} for k in range(1, N+1)] } # Apply rational-friendly representation manifest = to_repr(manifest, rational=rational) with open(out_path, "w", encoding="utf-8") as f: json.dump(manifest, f, indent=2) return out_path def main(): parser = argparse.ArgumentParser(description="Benevolent Weaponizer Toolkit") parser.add_argument("--N", type=int, default=12, help="length of accelerant sequence a(n)") parser.add_argument("--kunfs", type=int, nargs="+", default=[20000, 40000, 80000, 160000, 320000], help="n values for S_n") parser.add_argument("--rational", type=int, default=1, help="1 to include rationalized outputs") parser.add_argument("--prec", type=int, default=800, help="Decimal precision when rational=1") parser.add_argument("--out", type=str, default="weaponized_manifest.json", help="output JSON path") args = parser.parse_args() out = run_tool(N=args.N, kun_ns=args.kunfs, rational=bool(args.rational), prec=args.prec, out_path=args.out) print(f"Wrote manifest to {out}") if __name__ == "__main__": main() open('benevolent_weaponized.py','w').write(preview) # Execute the script once to produce a sample JSON manifest. import subprocess, json, pathlib, sys, textwrap out_path = 'weaponizer_manifest.json' subprocess.run(['python','benevolent_weaponized.py','--N','12','--kunfs','200','400','800','1600','3200','--rational','1','--prec','800','--out',out_path], check=True) # Show a small snippet of the resulting JSON (first 60 lines) to verify. with open(out_path,'r') as f: preview = ''.join([next(f) for _ in range(600)]) print(preview) import time, tracemalloc from decimal import Decimal, getcontext def benchmark_kunferman(n_values, prec=800): getcontext().prec = prec tracemalloc.start() results = [] partials = [] for n in n_values: t0 = time.time() # Kunferman evaluation nD = Decimal(n) n2 = nD * nD tot = Decimal(0) for j in range(1, n): jD = Decimal(j) tot += (jD * jD) / (n2 - jD * jD).sqrt() Sn = (Decimal(2) / n2) * tot partials.append(Sn) # Shanks extrapolation if len(partials) >= 3: a, b, c = map(float, partials[-3:]) denom = c - 2*b + a shanks = c - (c - b)**2 / denom if abs(denom) > 1e-30 else None else: shanks = None t1 = time.time() current, peak = tracemalloc.get_traced_memory() results.append({ "n": n, "Sn": float(Sn), "Shanks": shanks, "Time_s": round(t1 - t0, 4), "Mem_MB": round(peak / 10**6, 2) }) tracemalloc.stop() return results # Example usage n_vals = [20000, 40000, 80000, 160000, 320000] report = benchmark_kunferman(n_vals) for row in report: print(row) # Wave curvature vs H(z): starter notebook import numpy as np import pandas as pd import matplotlib.pyplot as plt from typing import Callable, Dict, Any, Tuple from dataclasses import dataclass from math import isfinite # ----------------------------- # 1) Data handling # ----------------------------- # If you have a CSV of H(z) measurements, place it at /mnt/data/hz_data.csv # with columns: z, H, H_err (H in km/s/Mpc). You can upload later and re-run the "Load your data" cell. def load_user_hz_csv(path:str="/mnt/data/hz_data.csv") -> pd.DataFrame: df = pd.read_csv(path) # standardize column names cols = {c.lower().strip(): c for c in df.columns} # try to find expected columns z_col = next((c for c in df.columns if c.lower().strip() in ["z","redshift"]), None) h_col = next((c for c in df.columns if c.lower().strip() in ["h","hz","h(z)","h_km_s_mpc"]), None) he_col = next((c for c in df.columns if c.lower().strip() in ["herr","h_err","sigma_h","sigma","error"]), None) if z_col is None or h_col is None: raise ValueError("CSV must have columns for z and H. Optionally H_err.") out = pd.DataFrame({ "z": df[z_col].astype(float), "H": df[h_col].astype(float), }) if he_col is not None: out["H_err"] = df[he_col].astype(float) else: out["H_err"] = np.nan out = out.sort_values("z").reset_index(drop=True) return out # For now, create a synthetic H(z) set using flat LCDM so you can test the pipeline immediately. # Replace/override with your dataset using load_user_hz_csv(). @dataclass class LCDMParams: H0: float # km/s/Mpc Omega_m: float Omega_L: float def H_LCDM(z: np.ndarray, p: LCDMParams) -> np.ndarray: # Flat LCDM: H(z) = H0 * sqrt( Omega_m*(1+z)^3 + Omega_L ) return p.H0 * np.sqrt(p.Omega_m * (1+z)**3 + p.Omega_L) def make_synthetic_hz(n:int=40, zmax:float=2.0, seed:int=0) -> pd.DataFrame: rng = np.random.default_rng(seed) z = np.linspace(0, zmax, n) p = LCDMParams(H0=67.4, Omega_m=0.315, Omega_L=1-0.315) H_true = H_LCDM(z, p) # add mild noise to mimic measurements sigma = 0.03*H_true + 2.0 H_obs = H_true + rng.normal(0, sigma) df = pd.DataFrame({"z": z, "H": H_obs, "H_err": sigma}) return df hz_df = make_synthetic_hz() @dataclass class WaveParams: # put any parameters you need here; placeholders included a: float = 1.0 b: float = 0.0 def wave_curvature(x: np.ndarray, params: WaveParams) -> np.ndarray: k = -(np.sin(x)**2-np.cos(x)**2/(np.sin(x)**2+np.cos(x)**2)**(3/2)) # Example placeholder: simple normalized shape you will overwrite. # y = a * f(x) + b return params.a * (1.0 + x) + params.b # If you need a z->t mapping, define it explicitly here (identity by default). def map_x(z: np.ndarray) -> np.ndarray: # >>> If your model uses t, specify t(z) here exactly. Currently identity. return z # ----------------------------- # 3) Fitting & Comparison Tools # ----------------------------- def fit_affine_to_Hz( x: np.ndarray, y_model: np.ndarray, H_obs: np.ndarray, w: np.ndarray=None ) -> Tuple[float, float]: """ Find alpha, beta minimizing sum w*(alpha*y_model + beta - H_obs)^2. If w is None, use uniform weights. """ X = np.vstack([y_model, np.ones_like(y_model)]).T if w is not None: W = np.diag(w) beta = np.linalg.lstsq(W @ X, W @ H_obs, rcond=None)[0] else: beta = np.linalg.lstsq(X, H_obs, rcond=None)[0] alpha, offset = beta[0], beta[1] return float(alpha), float(offset) def metrics(y_true: np.ndarray, y_pred: np.ndarray) -> Dict[str, Any]: resid = y_pred - y_true rmse = float(np.sqrt(np.mean(resid**2))) mae = float(np.mean(np.abs(resid))) max_abs_diff = float(np.max(np.abs(resid))) # location of max diff idx = int(np.argmax(np.abs(resid))) return { "RMSE": rmse, "MAE": mae, "MaxAbsDiff": max_abs_diff, "ArgMaxIdx": idx, "ArgMaxTrue": float(y_true[idx]), "ArgMaxPred": float(y_pred[idx]), } def pearson_r(a: np.ndarray, b: np.ndarray) -> float: a = np.asarray(a, float) b = np.asarray(b, float) am = a - a.mean() bm = b - b.mean() denom = np.sqrt((am**2).sum() * (bm**2).sum()) if denom == 0: return np.nan return float((am*bm).sum() / denom) # ----------------------------- # 4) Run baseline comparison # ----------------------------- # Choose which dataset to use: use_user_csv = False # set to True after you upload /mnt/data/hz_data.csv if use_user_csv: hz_df = load_user_hz_csv() # Prepare design vectors z = hz_df["z"].to_numpy() H_obs = hz_df["H"].to_numpy() H_err = hz_df["H_err"].to_numpy() w = None if np.all(np.isfinite(H_err)): # inverse-variance weights if you provided H_err with np.errstate(divide='ignore', invalid='ignore'): w = 1.0 / (H_err**2) w[~np.isfinite(w)] = 0.0 # Evaluate your wave curvature on chosen x=z (or mapped t(z)) x = map_x(z) params = WaveParams(a=1.0, b=0.0) # <-- update if your model has fixed values y_wave = wave_curvature(x, params) # Fit affine transform to best match H(z): H_fit = alpha*y_wave + beta alpha, beta = fit_affine_to_Hz(x, y_wave, H_obs, w=w) H_fit = alpha * y_wave + beta # Metrics m = metrics(H_obs, H_fit) r_wave_vs_H = pearson_r(y_wave, H_obs) print("=== Fit Results (H_fit = alpha * wave + beta) ===") print(f"alpha = {alpha:.6g}") print(f"beta = {beta:.6g}") print("\n=== Metrics (against observed H) ===") for k,v in m.items(): print(f"{k:>12}: {v}") print(f"\nPearson r(wave, H_obs): {r_wave_vs_H:.6g}") # ----------------------------- # 5) Plots # ----------------------------- # 5a) H(z) data and fitted model #plt.figure() #plt.errorbar(z, H_obs, yerr=H_err if np.any(np.isfinite(H_err)) else None, fmt='o', label="H(z) data") #plt.plot(z, H_fit, label="Fitted from wave curvature") #plt.xlabel("z") #plt.ylabel("H (km/s/Mpc)") #plt.title("H(z) vs. wave-curvature-derived fit") #plt.legend() #plt.show() # 5b) Residuals #plt.figure() #plt.plot(z, H_fit - H_obs, marker='o', linestyle='-') #plt.axhline(0, linestyle='--') #plt.xlabel("z") #plt.ylabel("Residual: H_fit - H_obs") #plt.title("Residuals") #plt.show() # 5c) Direct comparison: wave (scaled) vs H_obs plt.figure() plt.scatter(H_obs, H_fit) mn = min(H_obs.min(), H_fit.min()) mx = max(H_obs.max(), H_fit.max()) plt.plot([mn, mx], [mn, mx], linestyle='--') plt.xlabel("H_obs") plt.ylabel("H_fit (from wave)") plt.title("Observed vs Fitted") plt.show() # 5d) Table preview #from caas_jupyter_tools import display_dataframe_to_user #display_dataframe_to_user("H(z) data (current session)", hz_df)

import numpy as np z = np.linspace(1, 0.01,1000000) # 1000 points H0, Om, Or, OL = 70, 0.3, 1e-5, 0.7 H_z = H0 * np.sqrt(Om * (1+z)**3 + Or * (1+z)**4 + OL) # Simplified H(z) k_z = -(np.sin(z)**2-np.cos(z)**2/(np.sin(z)**2+np.cos(z)**2)**(3/2)) # k(z) formula diff = H_z - k_z # Should be < 1e-10 z = np.linspace(0,0.01,1000000) H0, Om, Or, OL = 70, 0.3, 1e-5, 0.7 # Standard params (km/s/Mpc) H_z = H0 * np.sqrt(Om * (1+z)**3 + Or * (1+z)**4 + OL) # H(z) with radiation k_z = H_z # Test case: k(z) = H(z) for zero difference diff = H_z - k_z # Difference should be zero max_diff = np.max(np.abs(diff)) print(f"Max difference: {max_diff:.10f}") # Print to 10 decimal places if max_diff < 1e-10: print("Passes 1e-10 precision test!") else: print("Does not meet 1e-10 precision.") def a(n): if n < 1: return 0 if n == 1: return 1 return a(n-1) + (1 + a(n-2)) * a(n-3) # Example: print first 15 terms for n in range(1, 9999): print(f"a({n}) = {a(n)}") import numpy as np import matplotlib.pyplot as plt # Constants g_earth = 9.81 # Earth's gravity for reference # Time values t_values = np.linspace(0, 0.01, 10000) # Function to generate the combined wave with varying frequencies def combined_wave_function(t, g, frequency): gravitational_component = np.sin(2 * np.pi * frequency * t) magnetic_component = np.cos(2 * np.pi * g * t) return gravitational_component * magnetic_component # Frequencies to test (from 230 to 250) frequencies = np.linspace(230, 250, 5) # Plotting plt.figure(figsize=(12, 6)) for freq in frequencies: combined_wave = combined_wave_function(t_values, g_earth, freq) plt.plot(t_values, combined_wave, label=f'Frequency {freq} Hz') plt.title('Combined Gravitational and Magnetic Wave Influence with Varying Frequencies') plt.xlabel('Time (t)') plt.ylabel('Wave Amplitude') plt.legend() plt.grid(True) plt.show()

# Create an analysis script that overlays three datasets (EMAG nT, Schumann coherence, HRV proxy) # and computes correlations on a common 1°x1° grid. The script uses only pandas/numpy/matplotlib. # It expects three CSV files with columns: lat, lon, value and specific filenames. # If files are missing, it prints clear instructions. import os, textwrap, json, pandas as pd, numpy as np from pathlib import Path base = Path("") script_path = base / "macro_micro_overlay.py" readme_path = base / "READ_ME_OVERLAY.txt" script = r''' """ Macro–Micro Field Overlay & Correlation ======================================= This script overlays three geospatial point datasets on a 1° grid and computes correlations: 1) EMAG magnetic intensity (nT) -> emag.csv 2) Schumann resonance coherence (unitless) -> schumann.csv 3) HRV proxy (e.g., RMSSD or LF/HF) -> hrv.csv EXPECTED CSV SCHEMA (all files): -------------------------------- lat, lon, value 12.5, -98.0, 42300.0 - lat in degrees (-90..90), lon in degrees (-180..180 or 0..360; both accepted) - value is the metric noted above. USAGE: ------ Place the three files in the SAME folder as this script (or adjust paths below), then run: python macro_micro_overlay.py OUTPUTS: -------- - overlay_grid.csv # 1°x1° global grid with aligned values and masks applied - correlations.json # global and regional Pearson/Spearman correlations - maps/*.png # quick-look heatmaps of each layer and paired deltas """ import os import json from pathlib import Path import numpy as np import pandas as pd import matplotlib.pyplot as plt HERE = Path(__file__).parent DATAFILES = { "emag": HERE / "emag.csv", "schumann": HERE / "schumann.csv", "hrv": HERE / "hrv.csv", } OUTDIR = HERE / "maps" OUTDIR.mkdir(exist_ok=True) def _read_csv(path: Path, name: str) -> pd.DataFrame: if not path.exists(): raise FileNotFoundError(f"Missing {name} data at: {path}. " "Expected columns: lat, lon, value") df = pd.read_csv(path) cols = {c.lower(): c for c in df.columns} for req in ("lat", "lon", "value"): if req not in cols: raise ValueError(f"{name} missing required column '{req}'. Found: {list(df.columns)}") # normalize col names df = df.rename(columns={cols["lat"]:"lat", cols["lon"]:"lon", cols["value"]:"value"}) # normalize longitude to [-180,180] df["lon"] = ((df["lon"] + 180) % 360) - 180 # clip latitude bounds df["lat"] = df["lat"].clip(-90, 90) return df def grid_1deg(df: pd.DataFrame, agg="median") -> pd.DataFrame: """Aggregate to 1°x1° grid using median (robust).""" # bin edges lat_bins = np.arange(-90, 91, 1) lon_bins = np.arange(-180, 181, 1) # assign bins df = df.copy() df["lat_bin"] = pd.cut(df["lat"], bins=lat_bins, labels=lat_bins[:-1], include_lowest=True) df["lon_bin"] = pd.cut(df["lon"], bins=lon_bins, labels=lon_bins[:-1], include_lowest=True) df = df.dropna(subset=["lat_bin","lon_bin"]) df["lat_bin"] = df["lat_bin"].astype(int) df["lon_bin"] = df["lon_bin"].astype(int) if agg == "median": g = df.groupby(["lat_bin","lon_bin"])["value"].median() else: g = df.groupby(["lat_bin","lon_bin"])["value"].mean() grid = g.reset_index().rename(columns={"lat_bin":"lat","lon_bin":"lon","value":agg}) return grid def zscore(series: pd.Series) -> pd.Series: m = series.mean() s = series.std(ddof=0) return (series - m) / s if s != 0 else series * 0.0 def plot_grid(df: pd.DataFrame, col: str, fname: Path, title: str): # pivot to 2D piv = df.pivot(index="lat", columns="lon", values=col).sort_index(ascending=False) plt.figure(figsize=(10,4.5)) plt.imshow(piv.values, aspect="auto", extent=[-180,180,-90,90]) plt.colorbar(label=col) plt.xlabel("Longitude") plt.ylabel("Latitude") plt.title(title) plt.tight_layout() plt.savefig(fname, dpi=180) plt.close() def corr(a: pd.Series, b: pd.Series): # mask NaNs mask = a.notna() & b.notna() if mask.sum() < 3: return {"pearson": None, "spearman": None, "n": int(mask.sum())} pear = a[mask].corr(b[mask], method="pearson") spear = a[mask].corr(b[mask], method="spearman") return {"pearson": float(pear), "spearman": float(spear), "n": int(mask.sum())} def regional_slices(df: pd.DataFrame): # simple macro regions regions = { "Global": (-90,90,-180,180), "Americas": (-60,75,-170,-30), "Europe_Africa": (-40,72,-30,60), "Asia_Oceania": (-50,70,60,180), "Equatorial_Belt": (-15,15,-180,180), "High_Latitude_North": (55,88,-180,180), "High_Latitude_South": (-88,-55,-180,180), } res = {} for name, (latmin, latmax, lonmin, lonmax) in regions.items(): sub = df[(df["lat"]>=latmin)&(df["lat"]<=latmax)&(df["lon"]>=lonmin)&(df["lon"]<=lonmax)] res[name] = sub return res def main(): # Load data = {} for key, path in DATAFILES.items(): data[key] = _read_csv(path, key) # Grid to 1° grids = {k: grid_1deg(v) for k, v in data.items()} for k, g in grids.items(): g = g.rename(columns={"median": k}) grids[k] = g # Join on lat,lon grid = grids["emag"] for k in ["schumann","hrv"]: grid = grid.merge(grids[k], on=["lat","lon"], how="outer") # z-scores (normalized anomalies for comparability) for k in ["emag","schumann","hrv"]: grid[f"{k}_z"] = zscore(grid[k]) # Save base overlay out_csv = HERE / "overlay_grid.csv" grid.to_csv(out_csv, index=False) # Quick-look maps if grid["emag"].notna().sum() > 0: plot_grid(grid, "emag", OUTDIR/"01_emag.png", "EMAG Intensity (nT) — 1° grid") if grid["schumann"].notna().sum() > 0: plot_grid(grid, "schumann", OUTDIR/"02_schumann.png", "Schumann Coherence — 1° grid") if grid["hrv"].notna().sum() > 0: plot_grid(grid, "hrv", OUTDIR/"03_hrv.png", "HRV Proxy — 1° grid") # Differences (z-space) if grid[["emag_z","schumann_z"]].notna().all(axis=1).sum() > 0: grid["schumann_minus_emag"] = grid["schumann_z"] - grid["emag_z"] plot_grid(grid, "schumann_minus_emag", OUTDIR/"10_schumann_minus_emag.png", "Schumann – EMAG (z-score space)") if grid[["hrv_z","emag_z"]].notna().all(axis=1).sum() > 0: grid["hrv_minus_emag"] = grid["hrv_z"] - grid["emag_z"] plot_grid(grid, "hrv_minus_emag", OUTDIR/"11_hrv_minus_emag.png", "HRV – EMAG (z-score space)") # Correlations (global & regional) results = {} regions = regional_slices(grid) for rname, sub in regions.items(): results[rname] = { "emag_vs_schumann": corr(sub["emag_z"], sub["schumann_z"]), "emag_vs_hrv": corr(sub["emag_z"], sub["hrv_z"]), "schumann_vs_hrv": corr(sub["schumann_z"], sub["hrv_z"]), "count_cells": int(len(sub)), } with open(HERE/"correlations.json", "w") as f: json.dump(results, f, indent=2) print("Done.\nOutputs:") print(f"- {out_csv}") print(f"- {HERE/'correlations.json'}") print(f"- {OUTDIR}/*.png") if __name__ == "__main__": try: main() except FileNotFoundError as e: print(str(e)) print("\nINSTRUCTIONS") print("=========== ") print("Provide three CSV files in this folder with columns: lat, lon, value") print("Required filenames: emag.csv, schumann.csv, hrv.csv") print("Examples:") print("lat,lon,value") print("12.5,-98.0,42300.0") print("\nThen re-run: python macro_micro_overlay.py") ''' readme = textwrap.dedent(''' MACRO–MICRO OVERLAY: What to provide ==================================== Files (CSV, UTF-8), placed next to macro_micro_overlay.py: 1) emag.csv - Columns: lat, lon, value - value: total magnetic intensity at altitude (nT) or crustal anomaly nT - Tip: In GIS, sample your EMAG raster to points (1° grid) and export CSV. 2) schumann.csv - Columns: lat, lon, value - value: Schumann resonance "coherence" or amplitude metric (unitless). If your data is station-based, you can krige/interpolate to a 1° grid first, or supply station points (the script aggregates with median per 1° cell). 3) hrv.csv - Columns: lat, lon, value - value: HRV proxy (e.g., RMSSD, SDNN, LF/HF). Use population-average or cohort-level estimates by city/region; the script will aggregate to 1° cells. What the script does -------------------- - Normalizes longitudes to [-180,180], bins to 1°x1° cells, aggregates by MEDIAN. - Z-score normalizes each field to compare anomalies on the same scale. - Exports overlay_grid.csv with all aligned values. - Computes Pearson and Spearman correlations globally and for macro regions. - Writes simple maps in maps/*.png. Next steps you might want ------------------------- - Swap 1° grid for 0.5° or equal-area hex bins for finer structure. - Weight cells by population or data confidence. - Add lag analysis (e.g., HRV shifted vs. geomagnetic indices) to probe causality. ''') # Write files script_path.write_text(script) readme_path.write_text(readme) print(f"Created script at: {script_path}") print(f"Created README at: {readme_path}")

#!/usr/bin/env python3 """ Benevolent Weaponizer — High-Performance Kunferman + Accelerant Toolkit ====================================================================== - Generates accelerant sequence a(n) with explosive milestones. - Estimates plastic constant ρ using log-log linear model. - Computes Kunferman partial sums S_n → π/2 with optional Shanks acceleration. - Supports high n values (e.g., 100,000) with optimized performance. - Optional rational output (fractions) or fast float mode. - Exports JSON manifest. Usage (CLI): python benevolent_weaponizer.py --N 12 --kunfs 10000 50000 100000 --float 1 --prec 80 --out out.json """ from __future__ import annotations import argparse, json, math, statistics from fractions import Fraction from decimal import Decimal, getcontext from typing import Iterable, List, Tuple, Optional import numpy as np # For numerical optimization from concurrent.futures import ThreadPoolExecutor # For parallelization # --------------------------- # 1) A209286 (Accelerant Sequence) # --------------------------- from typing import List def accel_sequence(n_max: int) -> List[int]: """Generate a(n) = a(n-1) + (1 + a(n-2)) * a(n-3), with a(1)=1 and a(n)=0 for n<1.""" a = [0] * (n_max + 1) # Pre-allocate for speed a[1] = 1 for n in range(2, n_max + 1): if n > 2: a[n] = a[n-1] + (1 + a[n-2]) * a[n-3] else: a[n] = a[n-1] return a[1:] for k in range(1, N+1): w_k = -(np.sin(k)^2-np.cos(k)^2/(np.sin(k)^2+np.cos(k)^2)^(3/2)) b_k = w_k* a[k]*(-1) S_n += b_k return S_n[1:] def estimate_plastic_constant(S_n: List[int]) -> float: """Estimate ρ using log-log linear regression.""" xs, ys = [], [] for n, val in enumerate(S_n, start=1): if val > math.e: xs.append(n) ys.append(math.log(math.log(val))) if len(xs) < 2: return float('nan') slope = np.polyfit(xs, ys, 1)[0] # Linear fit return math.exp(slope) # --------------------------------------- # 2) Kunferman Constant (→ π/2) with ACC # --------------------------------------- def S_n_float(n: int) -> float: """Fast float computation of S_n = (2/n^2) * sum_{j=1}^{n-1} j^2 / sqrt(n^2 - j^2).""" n2 = n * n total = 0.0 for j in range(1, n): total += (j * j) / math.sqrt(n2 - j * j) return (2.0 / n2) * total def S_n_decimal(n: int, prec: int = 10) -> Decimal: """High-precision Decimal computation (slower, for audit).""" getcontext().prec = prec nD = Decimal(n) n2 = nD * nD total = Decimal(0) for j in range(1, n): jD = Decimal(j) total += (jD * jD) / (n2 - jD * jD).sqrt() return (Decimal(2) / n2) * total def shanks_last3(seq: Iterable) -> Optional[float]: """Apply Shanks transformation to last 3 values.""" seq = list(seq) if len(seq) < 3: return None a, b, c = float(seq[-3]), float(seq[-2]), float(seq[-1]) denom = c - 2 * b + a if abs(denom) < 1e-30: return None return c - (c - b) ** 2 / denom # -------------------------- # JSON + Formatting # -------------------------- def to_repr(x, rational: bool, max_den: int = 10**12): """Convert to rational or float representation.""" if isinstance(x, int): return x if isinstance(x, Fraction): return {"num": x.numerator, "den": x.denominator} if isinstance(x, Decimal): if rational: frac = Fraction(str(x)).limit_denominator(max_den) return {"str": format(x, 'f'), "frac": {"num": frac.numerator, "den": frac.denominator}} return format(x, 'f') if isinstance(x, float): if rational: frac = Fraction(x).limit_denominator(max_den) return {"float": x, "frac": {"num": frac.numerator, "den": frac.denominator}} return x if isinstance(x, (list, tuple)): return [to_repr(v, rational, max_den) for v in x] if isinstance(x, dict): return {k: to_repr(v, rational, max_den) for k, v in x.items()} return x def run_tool(N: int, kun_ns: List[int], use_float: bool, rational: bool, prec: int, out_path: str): """Execute the toolkit and generate manifest.""" # Accelerant aseq = accel_sequence(N) rho_hat = estimate_plastic_constant(aseq) # Kunferman with parallel computation for large n kun_rows = [] partials = [] compute_fn = S_n_float if use_float else lambda n: S_n_decimal(n, prec) with ThreadPoolExecutor() as executor: results = list(executor.map(compute_fn, kun_ns)) for n, val in zip(kun_ns, results): partials.append(val) s = shanks_last3(partials) if len(partials) >= 3 else None kun_rows.append({"n": n, "Sn": val, "Shanks": s}) manifest = { "meta": { "target_zeta_half": "-1.46035450880958681288949915243115158801747803922037099505044264194440954713299760898060888460863250697830616293996880679523253989467738465270655506595772211659258298388065673466520011407827638790049420853417114390033200317960933445098258793084394766550446625410716514420673973163207979329998083869676169490981635546547343980889690679842069380780797616817995089538939468440708626289995379839323340716186189280246714199058066090170661001042036590461570468470716330239697283959466245078517092260718939460740379830886520032444499249519699824063270708258515809930127018294564216630110553022260400718060444496540310245499683345549768428838069050643971609002446715370519836931041397656515988770751408314760009154219460039261666695367097345884216399140940983360090501420370768060240480841697650909996816860636398300439687019060730680985930646105038459150814880769678164710682640645020809844247281420414345409676513250087646498966103027640741035373997177784398157263465930674390980645936188857788920618294564790716926530638255076446360127979874499708518630899288060553028626293996880796081657818801407812634310500656325563347150264331398319354216970035938929962960245719679945883954423366289469686468660125408999698668300320069887268293996880679523253989467738465265673466520011", "rational_output": rational, "decimal_precision": prec, "use_float": use_float }, "accelerant": { "N": N, "a_sequence": aseq, "rho_hat": rho_hat, }, "kunferman": kun_rows, "milestones": [{"k": k, "N_k": aseq[k-1]} for k in range(1, N+1)] } manifest = to_repr(manifest, rational=rational) with open(out_path, "w", encoding="utf-8") as f: json.dump(manifest, f, indent=2) return out_path def main(): """Parse args and run tool.""" parser = argparse.ArgumentParser(description="High-Performance Kunferman Toolkit") parser.add_argument("--N", type=int, default=35, help="Length of accelerant sequence a(n)") parser.add_argument("--kunfs", type=int, nargs="+", default=[100000, 50000, 10000], help="n values for S_n (e.g., 10000000 500000000 1000000000)") parser.add_argument("--float", type=int, default=0, help="1 for fast float mode, 0 for Decimal") parser.add_argument("--rational", type=int, default=1, help="1 to include rationalized outputs") parser.add_argument("--prec", type=int, default=80, help="Decimal precision when not using float") parser.add_argument("--out", type=str, default="weaponizer_manifest.json", help="Output JSON path") args = parser.parse_args() out = run_tool(N=args.N, kun_ns=args.kunfs, use_float=bool(args.float), rational=bool(args.rational), prec=args.prec, out_path=args.out) print(f"Wrote manifest to {out}") if __name__ == "__main__": main() #v1 script = r''' #!/usr/bin/env python3 """ Benevolent Weaponizer — Kunferman + Accelerant Toolkit ====================================================== - Generates the accelerant sequence a(n) with explosive milestones. - Estimates the plastic constant ρ from data (log-log linear model). - Computes Kunferman partial sums S_n → π/2 with optional Shanks acceleration. - Optional rationalized output (fractions) for audit trails. - Exports a compact JSON manifest with all data. Usage (CLI): python benevolent_weaponizer.py --N 12 --kunfs 200 400 800 1600 --rational 1 --prec 80 --out out.json """ from __future__ import annotations import argparse, json, math, statistics from fractions import Fraction from decimal import Decimal, getcontext from typing import Iterable, List, Tuple, Optional # --------------------------- # 1) A209286 # --------------------------- def accel_sequence(n_max: int) -> List[int]: # a(n)=a(n-1)+(1+a(n-2))*a(n-3), a(1)=1, a(n)=0 for n<1 a: List[int] = [0, 1] # 1-indexed; a[0] placeholder for n in range(2, n_max+1): a_nm1 = a[n-1] a_nm2 = a[n-2] if n-2 >= 1 else 0 a_nm3 = a[n-3] if n-3 >= 1 else 0 a.append(a_nm1 + (1 + a_nm2) * a_nm3) return a[1:] def estimate_plastic_constant(a: List[int]) -> float: # Model: ln ln a_n ≈ ln C + n ln ρ (valid once a_n > e) xs, ys = [], [] for n, val in enumerate(a, start=1): if val > math.e: # avoid log log <= 0 xs.append(n) ys.append(math.log(math.log(val))) if len(xs) < 2: return float('nan') xbar = sum(xs) / len(xs) ybar = sum(ys) / len(ys) num = sum((x-xbar)*(y-ybar) for x, y in zip(xs, ys)) den = sum((x-xbar)**2 for x in xs) slope = num/den rho = math.exp(slope) return rho # --------------------------------------- # 2) Kunferman constant (→ π/2) with ACC # --------------------------------------- def S_n_float(n: int) -> float: # S_n = (2/n^2) * sum_{j=1}^{n-1} j^2 / sqrt(n^2 - j^2) tot = 0.0 n2 = n*n for j in range(1, n): tot += (j*j) / math.sqrt(n2 - j*j) return (2.0 / n2) * tot def S_n_decimal(n: int, prec: int = 80) -> Decimal: # High-precision decimal evaluation (deterministic, audit-friendly) getcontext().prec = prec nD = Decimal(n) n2 = nD*nD tot = Decimal(0) for j in range(1, n): jD = Decimal(j) tot += (jD*jD) / (n2 - jD*jD).sqrt() return (Decimal(2) / n2) * tot def shanks_last3(seq: Iterable) -> Optional[float]: seq = list(seq) if len(seq) < 3: return None a, b, c = seq[-3], seq[-2], seq[-1] a = float(a); b = float(b); c = float(c) denom = c - 2*b + a if abs(denom) < 1e-30: return None return c - (c - b)**2 / denom # -----------------------

""" Macro–Micro Field Overlay & Correlation ======================================= This script overlays three geospatial point datasets on a 1° grid and computes correlations: 1) EMAG magnetic intensity (nT) -> emag.csv 2) Schumann resonance coherence (unitless) -> schumann.csv 3) HRV proxy (e.g., RMSSD or LF/HF) -> hrv.csv EXPECTED CSV SCHEMA (all files): -------------------------------- lat, lon, value 12.5, -98.0, 42300.0 - lat in degrees (-90..90), lon in degrees (-180..180 or 0..360; both accepted) - value is the metric noted above. USAGE: ------ Place the three files in the SAME folder as this script (or adjust paths below), then run: python macro_micro_overlay.py OUTPUTS: -------- - overlay_grid.csv # 1°x1° global grid with aligned values and masks applied - correlations.json # global and regional Pearson/Spearman correlations - maps/*.png # quick-look heatmaps of each layer and paired deltas """ import os import json from pathlib import Path import numpy as np import pandas as pd import matplotlib.pyplot as plt HERE = Path(__file__).parent DATAFILES = { "emag": HERE / "emag.csv", "schumann": HERE / "schumann.csv", "hrv": HERE / "hrv.csv", } OUTDIR = HERE / "maps" OUTDIR.mkdir(exist_ok=True) def _read_csv(path: Path, name: str) -> pd.DataFrame: if not path.exists(): raise FileNotFoundError(f"Missing {name} data at: {path}. " "Expected columns: lat, lon, value") df = pd.read_csv(path) cols = {c.lower(): c for c in df.columns} for req in ("lat", "lon", "value"): if req not in cols: raise ValueError(f"{name} missing required column '{req}'. Found: {list(df.columns)}") # normalize col names df = df.rename(columns={cols["lat"]:"lat", cols["lon"]:"lon", cols["value"]:"value"}) # normalize longitude to [-180,180] df["lon"] = ((df["lon"] + 180) % 360) - 180 # clip latitude bounds df["lat"] = df["lat"].clip(-90, 90) return df def grid_1deg(df: pd.DataFrame, agg="median") -> pd.DataFrame: """Aggregate to 1°x1° grid using median (robust).""" # bin edges lat_bins = np.arange(-90, 91, 1) lon_bins = np.arange(-180, 181, 1) # assign bins df = df.copy() df["lat_bin"] = pd.cut(df["lat"], bins=lat_bins, labels=lat_bins[:-1], include_lowest=True) df["lon_bin"] = pd.cut(df["lon"], bins=lon_bins, labels=lon_bins[:-1], include_lowest=True) df = df.dropna(subset=["lat_bin","lon_bin"]) df["lat_bin"] = df["lat_bin"].astype(int) df["lon_bin"] = df["lon_bin"].astype(int) if agg == "median": g = df.groupby(["lat_bin","lon_bin"])["value"].median() else: g = df.groupby(["lat_bin","lon_bin"])["value"].mean() grid = g.reset_index().rename(columns={"lat_bin":"lat","lon_bin":"lon","value":agg}) return grid def zscore(series: pd.Series) -> pd.Series: m = series.mean() s = series.std(ddof=0) return (series - m) / s if s != 0 else series * 0.0 def plot_grid(df: pd.DataFrame, col: str, fname: Path, title: str): # pivot to 2D piv = df.pivot(index="lat", columns="lon", values=col).sort_index(ascending=False) plt.figure(figsize=(10,4.5)) plt.imshow(piv.values, aspect="auto", extent=[-180,180,-90,90]) plt.colorbar(label=col) plt.xlabel("Longitude") plt.ylabel("Latitude") plt.title(title) plt.tight_layout() plt.savefig(fname, dpi=180) plt.close() def corr(a: pd.Series, b: pd.Series): # mask NaNs mask = a.notna() & b.notna() if mask.sum() < 3: return {"pearson": None, "spearman": None, "n": int(mask.sum())} pear = a[mask].corr(b[mask], method="pearson") spear = a[mask].corr(b[mask], method="spearman") return {"pearson": float(pear), "spearman": float(spear), "n": int(mask.sum())} def regional_slices(df: pd.DataFrame): # simple macro regions regions = { "Global": (-90,90,-180,180), "Americas": (-60,75,-170,-30), "Europe_Africa": (-40,72,-30,60), "Asia_Oceania": (-50,70,60,180), "Equatorial_Belt": (-15,15,-180,180), "High_Latitude_North": (55,88,-180,180), "High_Latitude_South": (-88,-55,-180,180), } res = {} for name, (latmin, latmax, lonmin, lonmax) in regions.items(): sub = df[(df["lat"]>=latmin)&(df["lat"]<=latmax)&(df["lon"]>=lonmin)&(df["lon"]<=lonmax)] res[name] = sub return res def main(): # Load data = {} for key, path in DATAFILES.items(): data[key] = _read_csv(path, key) # Grid to 1° grids = {k: grid_1deg(v) for k, v in data.items()} for k, g in grids.items(): g = g.rename(columns={"median": k}) grids[k] = g # Join on lat,lon grid = grids["emag"] for k in ["schumann","hrv"]: grid = grid.merge(grids[k], on=["lat","lon"], how="outer") # z-scores (normalized anomalies for comparability) for k in ["emag","schumann","hrv"]: grid[f"{k}_z"] = zscore(grid[k]) # Save base overlay out_csv = HERE / "overlay_grid.csv" grid.to_csv(out_csv, index=False) # Quick-look maps if grid["emag"].notna().sum() > 0: plot_grid(grid, "emag", OUTDIR/"01_emag.png", "EMAG Intensity (nT) — 1° grid") if grid["schumann"].notna().sum() > 0: plot_grid(grid, "schumann", OUTDIR/"02_schumann.png", "Schumann Coherence — 1° grid") if grid["hrv"].notna().sum() > 0: plot_grid(grid, "hrv", OUTDIR/"03_hrv.png", "HRV Proxy — 1° grid") # Differences (z-space) if grid[["emag_z","schumann_z"]].notna().all(axis=1).sum() > 0: grid["schumann_minus_emag"] = grid["schumann_z"] - grid["emag_z"] plot_grid(grid, "schumann_minus_emag", OUTDIR/"10_schumann_minus_emag.png", "Schumann – EMAG (z-score space)") if grid[["hrv_z","emag_z"]].notna().all(axis=1).sum() > 0: grid["hrv_minus_emag"] = grid["hrv_z"] - grid["emag_z"] plot_grid(grid, "hrv_minus_emag", OUTDIR/"11_hrv_minus_emag.png", "HRV – EMAG (z-score space)") # Correlations (global & regional) results = {} regions = regional_slices(grid) for rname, sub in regions.items(): results[rname] = { "emag_vs_schumann": corr(sub["emag_z"], sub["schumann_z"]), "emag_vs_hrv": corr(sub["emag_z"], sub["hrv_z"]), "schumann_vs_hrv": corr(sub["schumann_z"], sub["hrv_z"]), "count_cells": int(len(sub)), } with open(HERE/"correlations.json", "w") as f: json.dump(results, f, indent=2) print("Done.\nOutputs:") print(f"- {out_csv}") print(f"- {HERE/'correlations.json'}") print(f"- {OUTDIR}/*.png") if __name__ == "__main__": try: main() except FileNotFoundError as e: print(str(e)) print("\nINSTRUCTIONS") print("=========== ") print("Provide three CSV files in this folder with columns: lat, lon, value") print("Required filenames: emag.csv, schumann.csv, hrv.csv") print("Examples:") print("lat,lon,value") print("12.5,-98.0,42300.0") print("\nThen re-run: python macro_micro_overlay.py") #latest #!/usr/bin/env python3 """ Benevolent Weaponizer — High-Performance Kunferman + Accelerant Toolkit ====================================================================== - Generates accelerant sequence a(n) with explosive milestones. - Estimates plastic constant ρ using log-log linear model. - Computes Kunferman partial sums S_n → π/2 with optional Shanks acceleration. - Supports high n values (e.g., 100,000) with optimized performance. - Optional rational output (fractions) or fast float mode. - Exports JSON manifest. Usage (CLI): python benevolent_weaponizer.py --N 12 --kunfs 10000 50000 100000 --float 1 --prec 80 --out out.json """ from __future__ import annotations import argparse, json, math, statistics from fractions import Fraction from decimal import Decimal, getcontext from typing import Iterable, List, Tuple, Optional import numpy as np # For numerical optimization from concurrent.futures import ThreadPoolExecutor # For parallelization # --------------------------- # 1) A209286 (Accelerant Sequence) # --------------------------- from typing import List def accel_sequence(n_max: int) -> List[int]: """Generate a(n) = a(n-1) + (1 + a(n-2)) * a(n-3), with a(1)=1 and a(n)=0 for n<1.""" a = [0] * (n_max + 1) # Pre-allocate for speed a[1] = 1 for n in range(2, n_max + 1): if n > 2: a[n] = a[n-1] + (1 + a[n-2]) * a[n-3] else: a[n] = a[n-1] return a[1:] def accel_sequence_curved(n_max: int) -> List[float]: """Accelerant sequence with wave curvature modulation.""" a = [0.0] * (n_max + 1) a[1] = 1.0 for n in range(2, n_max + 1): if n > 2: raw = a[n-1] + (1 + a[n-2]) * a[n-3] curvature = wave_curvature(n) a[n] = raw * curvature else: a[n] = a[n-1] return a[1:] def wave_curvature(n: int) -> float: """Wave curvature weight function.""" sin2 = np.sin(n)**2 cos2 = np.cos(n)**2 num = sin2 - cos2 denom = (sin2 + cos2)**(3/2) if denom == 0: return 0.0 return -(num / denom) def estimate_plastic_constant(S_n: List[int]) -> float: """Estimate ρ using log-log linear regression.""" xs, ys = [], [] for n, val in enumerate(S_n, start=1): if val > math.e: xs.append(n) ys.append(math.log(math.log(val))) if len(xs) < 2: return float('nan') slope = np.polyfit(xs, ys, 1)[0] # Linear fit return math.exp(slope) # --------------------------------------- # 2) Kunferman Constant (→ π/2) with ACC # --------------------------------------- def S_n_float(n: int) -> float: """Fast float computation of S_n = (2/n^2) * sum_{j=1}^{n-1} j^2 / sqrt(n^2 - j^2).""" n2 = n * n total = 0.0 for j in range(1, n): total += (j * j) / math.sqrt(n2 - j * j) return (2.0 / n2) * total def S_n_decimal(n: int, prec: int = 10) -> Decimal: """High-precision Decimal computation (slower, for audit).""" getcontext().prec = prec nD = Decimal(n) n2 = nD * nD total = Decimal(0) for j in range(1, n): jD = Decimal(j) total += (jD * jD) / (n2 - jD * jD).sqrt() return (Decimal(2) / n2) * total def shanks_last3(seq: Iterable) -> Optional[float]: """Apply Shanks transformation to last 3 values.""" seq = list(seq) if len(seq) < 3: return None a, b, c = float(seq[-3]), float(seq[-2]), float(seq[-1]) denom = c - 2 * b + a if abs(denom) < 1e-30: return None return c - (c - b) ** 2 / denom # -------------------------- # JSON + Formatting # -------------------------- def to_repr(x, rational: bool, max_den: int = 10**12): """Convert to rational or float representation.""" if isinstance(x, int): return x if isinstance(x, Fraction): return {"num": x.numerator, "den": x.denominator} if isinstance(x, Decimal): if rational: frac = Fraction(str(x)).limit_denominator(max_den) return {"str": format(x, 'f'), "frac": {"num": frac.numerator, "den": frac.denominator}} return format(x, 'f') if isinstance(x, float): if rational: frac = Fraction(x).limit_denominator(max_den) return {"float": x, "frac": {"num": frac.numerator, "den": frac.denominator}} return x if isinstance(x, (list, tuple)): return [to_repr(v, rational, max_den) for v in x] if isinstance(x, dict): return {k: to_repr(v, rational, max_den) for k, v in x.items()} return x def run_tool(N: int, kun_ns: List[int], use_float: bool, rational: bool, prec: int, out_path: str): """Execute the toolkit and generate manifest.""" # Accelerant aseq = accel_sequence_curved(N) rho_hat = estimate_plastic_constant(aseq) # Kunferman with parallel computation for large n kun_rows = [] partials = [] compute_fn = S_n_float if use_float else lambda n: S_n_decimal(n, prec) with ThreadPoolExecutor() as executor: results = list(executor.map(compute_fn, kun_ns)) for n, val in zip(kun_ns, results): partials.append(val) s = shanks_last3(partials) if len(partials) >= 3 else None kun_rows.append({"n": n, "Sn": val, "Shanks": s}) manifest = { "meta": { "target_zeta_half": "-1.460354508809", "rational_output": rational, "decimal_precision": prec, "use_float": use_float }, "accelerant": { "N": N, "a_sequence": aseq, "rho_hat": rho_hat, }, "kunferman": kun_rows, "milestones": [{"k": k, "N_k": aseq[k-1]} for k in range(1, N+1)] } manifest = to_repr(manifest, rational=rational) with open(out_path, "w", encoding="utf-8") as f: json.dump(manifest, f, indent=2) return out_path def main(): """Parse args and run tool.""" parser = argparse.ArgumentParser(description="High-Performance Kunferman Toolkit") parser.add_argument("--N", type=int, default=35, help="Length of accelerant sequence a(n)") parser.add_argument("--kunfs", type=int, nargs="+", default=[100000, 50000, 10000], help="n values for S_n (e.g., 10000000 500000000 1000000000)") parser.add_argument("--float", type=int, default=0, help="1 for fast float mode, 0 for Decimal") parser.add_argument("--rational", type=int, default=1, help="1 to include rationalized outputs") parser.add_argument("--prec", type=int, default=80, help="Decimal precision when not using float") parser.add_argument("--out", type=str, default="weaponizer_manifest.json", help="Output JSON path") args = parser.parse_args() out = run_tool(N=args.N, kun_ns=args.kunfs, use_float=bool(args.float), rational=bool(args.rational), prec=args.prec, out_path=args.out) print(f"Wrote manifest to {out}") if __name__ == "__main__": main()