Scientific Machine Learning
Physics-informed AI, climate modeling, and computational science applications
🔬 Scientific Research Notice
Scientific ML applications require deep domain expertise in both machine learning and the target scientific field. This content provides educational examples and should not replace rigorous scientific validation, peer review, and domain expert consultation. Results must be validated against established physical principles and experimental data.
Scientific ML Domains
Physics-Informed Neural Networks (PINNs)
Physics-Informed Neural Network Framework
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from typing import Dict, List, Tuple, Callable, Optional
import matplotlib.pyplot as plt
from scipy.optimize import minimize
import sympy as sp
class PhysicsInformedNeuralNetwork(nn.Module):
"""
Physics-Informed Neural Network (PINN) framework
Features:
- Automatic differentiation for physics constraints
- Multi-physics domain support
- Adaptive loss weighting
- Uncertainty quantification
- Transfer learning for related problems
"""
def __init__(self, config: Dict):
super().__init__()
self.config = config
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Network architecture
self.input_dim = config['input_dim']
self.output_dim = config['output_dim']
self.hidden_dims = config['hidden_dims']
# Build network layers
self.network = self._build_network()
# Physics equations and constraints
self.physics_equations = []
self.boundary_conditions = []
self.initial_conditions = []
# Loss components and weights
self.loss_weights = {
'data': config.get('data_weight', 1.0),
'physics': config.get('physics_weight', 1.0),
'boundary': config.get('boundary_weight', 1.0),
'initial': config.get('initial_weight', 1.0)
}
# Adaptive loss weighting
self.adaptive_weights = config.get('adaptive_weights', True)
self.weight_update_freq = config.get('weight_update_freq', 100)
# Uncertainty quantification
self.uncertainty_estimation = config.get('uncertainty_estimation', False)
self.ensemble_size = config.get('ensemble_size', 5)
def _build_network(self) -> nn.Module:
"""Build the neural network architecture"""
layers = []
prev_dim = self.input_dim
# Hidden layers
for hidden_dim in self.hidden_dims:
layers.extend([
nn.Linear(prev_dim, hidden_dim),
nn.Tanh(), # Smooth activation for better gradients
nn.Dropout(self.config.get('dropout', 0.1))
])
prev_dim = hidden_dim
# Output layer
layers.append(nn.Linear(prev_dim, self.output_dim))
return nn.Sequential(*layers)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward pass through the network"""
return self.network(x)
def add_physics_equation(self, equation_func: Callable, weight: float = 1.0):
"""Add a physics equation constraint"""
self.physics_equations.append({
'function': equation_func,
'weight': weight
})
def add_boundary_condition(self, bc_func: Callable, weight: float = 1.0):
"""Add a boundary condition constraint"""
self.boundary_conditions.append({
'function': bc_func,
'weight': weight
})
def add_initial_condition(self, ic_func: Callable, weight: float = 1.0):
"""Add an initial condition constraint"""
self.initial_conditions.append({
'function': ic_func,
'weight': weight
})
def compute_physics_loss(self, x_physics: torch.Tensor) -> torch.Tensor:
"""Compute loss from physics equation violations"""
physics_loss = torch.tensor(0.0, device=self.device)
for equation in self.physics_equations:
# Compute equation residual
residual = equation['function'](x_physics, self)
# Mean squared residual
equation_loss = torch.mean(residual ** 2)
physics_loss += equation['weight'] * equation_loss
return physics_loss
def compute_boundary_loss(self, x_boundary: torch.Tensor,
u_boundary: torch.Tensor) -> torch.Tensor:
"""Compute loss from boundary condition violations"""
boundary_loss = torch.tensor(0.0, device=self.device)
for bc in self.boundary_conditions:
# Compute boundary condition residual
residual = bc['function'](x_boundary, u_boundary, self)
# Mean squared residual
bc_loss = torch.mean(residual ** 2)
boundary_loss += bc['weight'] * bc_loss
return boundary_loss
def compute_initial_loss(self, x_initial: torch.Tensor,
u_initial: torch.Tensor) -> torch.Tensor:
"""Compute loss from initial condition violations"""
initial_loss = torch.tensor(0.0, device=self.device)
for ic in self.initial_conditions:
# Compute initial condition residual
residual = ic['function'](x_initial, u_initial, self)
# Mean squared residual
ic_loss = torch.mean(residual ** 2)
initial_loss += ic['weight'] * ic_loss
return initial_loss
def compute_data_loss(self, x_data: torch.Tensor,
u_data: torch.Tensor) -> torch.Tensor:
"""Compute loss from training data"""
u_pred = self.forward(x_data)
return torch.mean((u_pred - u_data) ** 2)
def compute_total_loss(self, x_data: torch.Tensor, u_data: torch.Tensor,
x_physics: torch.Tensor, x_boundary: torch.Tensor,
u_boundary: torch.Tensor, x_initial: torch.Tensor,
u_initial: torch.Tensor) -> Dict[str, torch.Tensor]:
"""Compute total weighted loss"""
# Individual loss components
data_loss = self.compute_data_loss(x_data, u_data)
physics_loss = self.compute_physics_loss(x_physics)
boundary_loss = self.compute_boundary_loss(x_boundary, u_boundary)
initial_loss = self.compute_initial_loss(x_initial, u_initial)
# Weighted total loss
total_loss = (
self.loss_weights['data'] * data_loss +
self.loss_weights['physics'] * physics_loss +
self.loss_weights['boundary'] * boundary_loss +
self.loss_weights['initial'] * initial_loss
)
return {
'total': total_loss,
'data': data_loss,
'physics': physics_loss,
'boundary': boundary_loss,
'initial': initial_loss
}
def update_loss_weights(self, losses: Dict[str, torch.Tensor]):
"""Adaptively update loss weights based on relative magnitudes"""
if not self.adaptive_weights:
return
# Calculate relative loss magnitudes
loss_values = {k: v.item() for k, v in losses.items() if k != 'total'}
max_loss = max(loss_values.values())
# Update weights inversely proportional to loss magnitude
for loss_type in self.loss_weights:
if loss_type in loss_values and loss_values[loss_type] > 0:
self.loss_weights[loss_type] = max_loss / loss_values[loss_type]
class FluidDynamicsPINN(PhysicsInformedNeuralNetwork):
"""PINN specialized for fluid dynamics (Navier-Stokes equations)"""
def __init__(self, config: Dict):
super().__init__(config)
# Fluid properties
self.reynolds_number = config.get('reynolds_number', 100.0)
self.density = config.get('density', 1.0)
self.viscosity = config.get('viscosity', 0.01)
# Add Navier-Stokes equations
self.add_physics_equation(self._navier_stokes_continuity)
self.add_physics_equation(self._navier_stokes_momentum_x)
self.add_physics_equation(self._navier_stokes_momentum_y)
def _navier_stokes_continuity(self, x: torch.Tensor, model: nn.Module) -> torch.Tensor:
"""Continuity equation: ∂u/∂x + ∂v/∂y = 0"""
x.requires_grad_(True)
# Forward pass
output = model(x)
u = output[:, 0:1] # x-velocity
v = output[:, 1:2] # y-velocity
# Compute gradients
u_x = torch.autograd.grad(u, x, grad_outputs=torch.ones_like(u),
create_graph=True)[0][:, 0:1]
v_y = torch.autograd.grad(v, x, grad_outputs=torch.ones_like(v),
create_graph=True)[0][:, 1:2]
# Continuity equation residual
residual = u_x + v_y
return residual
def _navier_stokes_momentum_x(self, x: torch.Tensor, model: nn.Module) -> torch.Tensor:
"""X-momentum equation"""
x.requires_grad_(True)
# Forward pass
output = model(x)
u = output[:, 0:1] # x-velocity
v = output[:, 1:2] # y-velocity
p = output[:, 2:3] # pressure
# First-order gradients
u_x = torch.autograd.grad(u, x, grad_outputs=torch.ones_like(u),
create_graph=True)[0][:, 0:1]
u_y = torch.autograd.grad(u, x, grad_outputs=torch.ones_like(u),
create_graph=True)[0][:, 1:2]
u_t = torch.autograd.grad(u, x, grad_outputs=torch.ones_like(u),
create_graph=True)[0][:, 2:3] if x.shape[1] > 2 else torch.zeros_like(u)
# Second-order gradients for viscous terms
u_xx = torch.autograd.grad(u_x, x, grad_outputs=torch.ones_like(u_x),
create_graph=True)[0][:, 0:1]
u_yy = torch.autograd.grad(u_y, x, grad_outputs=torch.ones_like(u_y),
create_graph=True)[0][:, 1:2]
# Pressure gradient
p_x = torch.autograd.grad(p, x, grad_outputs=torch.ones_like(p),
create_graph=True)[0][:, 0:1]
# Navier-Stokes x-momentum equation
# ∂u/∂t + u∂u/∂x + v∂u/∂y = -∂p/∂x + (1/Re)(∂²u/∂x² + ∂²u/∂y²)
residual = (u_t + u * u_x + v * u_y + p_x -
(1 / self.reynolds_number) * (u_xx + u_yy))
return residual
def _navier_stokes_momentum_y(self, x: torch.Tensor, model: nn.Module) -> torch.Tensor:
"""Y-momentum equation"""
x.requires_grad_(True)
# Forward pass
output = model(x)
u = output[:, 0:1] # x-velocity
v = output[:, 1:2] # y-velocity
p = output[:, 2:3] # pressure
# First-order gradients
v_x = torch.autograd.grad(v, x, grad_outputs=torch.ones_like(v),
create_graph=True)[0][:, 0:1]
v_y = torch.autograd.grad(v, x, grad_outputs=torch.ones_like(v),
create_graph=True)[0][:, 1:2]
v_t = torch.autograd.grad(v, x, grad_outputs=torch.ones_like(v),
create_graph=True)[0][:, 2:3] if x.shape[1] > 2 else torch.zeros_like(v)
# Second-order gradients for viscous terms
v_xx = torch.autograd.grad(v_x, x, grad_outputs=torch.ones_like(v_x),
create_graph=True)[0][:, 0:1]
v_yy = torch.autograd.grad(v_y, x, grad_outputs=torch.ones_like(v_y),
create_graph=True)[0][:, 1:2]
# Pressure gradient
p_y = torch.autograd.grad(p, x, grad_outputs=torch.ones_like(p),
create_graph=True)[0][:, 1:2]
# Navier-Stokes y-momentum equation
# ∂v/∂t + u∂v/∂x + v∂v/∂y = -∂p/∂y + (1/Re)(∂²v/∂x² + ∂²v/∂y²)
residual = (v_t + u * v_x + v * v_y + p_y -
(1 / self.reynolds_number) * (v_xx + v_yy))
return residual
class HeatTransferPINN(PhysicsInformedNeuralNetwork):
"""PINN specialized for heat transfer problems"""
def __init__(self, config: Dict):
super().__init__(config)
# Thermal properties
self.thermal_diffusivity = config.get('thermal_diffusivity', 1.0)
self.heat_source = config.get('heat_source', 0.0)
# Add heat equation
self.add_physics_equation(self._heat_equation)
def _heat_equation(self, x: torch.Tensor, model: nn.Module) -> torch.Tensor:
"""Heat equation: ∂T/∂t = α∇²T + Q"""
x.requires_grad_(True)
# Forward pass
T = model(x) # Temperature
# First-order gradients
T_t = torch.autograd.grad(T, x, grad_outputs=torch.ones_like(T),
create_graph=True)[0][:, -1:] # Time derivative
T_x = torch.autograd.grad(T, x, grad_outputs=torch.ones_like(T),
create_graph=True)[0][:, 0:1] # x derivative
T_y = torch.autograd.grad(T, x, grad_outputs=torch.ones_like(T),
create_graph=True)[0][:, 1:2] # y derivative
# Second-order gradients (Laplacian)
T_xx = torch.autograd.grad(T_x, x, grad_outputs=torch.ones_like(T_x),
create_graph=True)[0][:, 0:1]
T_yy = torch.autograd.grad(T_y, x, grad_outputs=torch.ones_like(T_y),
create_graph=True)[0][:, 1:2]
# Heat equation residual
laplacian = T_xx + T_yy
residual = T_t - self.thermal_diffusivity * laplacian - self.heat_source
return residual
class PINNTrainer:
"""Training framework for Physics-Informed Neural Networks"""
def __init__(self, model: PhysicsInformedNeuralNetwork, config: Dict):
self.model = model
self.config = config
self.device = model.device
# Optimizer
self.optimizer = optim.Adam(
model.parameters(),
lr=config.get('learning_rate', 0.001)
)
# Learning rate scheduler
self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer, patience=config.get('lr_patience', 100)
)
# Training history
self.loss_history = []
def train(self, data_dict: Dict[str, torch.Tensor], num_epochs: int) -> Dict:
"""Train the PINN model"""
# Move data to device
for key in data_dict:
data_dict[key] = data_dict[key].to(self.device)
self.model.train()
for epoch in range(num_epochs):
self.optimizer.zero_grad()
# Compute total loss
losses = self.model.compute_total_loss(**data_dict)
total_loss = losses['total']
# Backward pass
total_loss.backward()
# Gradient clipping
torch.nn.utils.clip_grad_norm_(
self.model.parameters(),
self.config.get('max_grad_norm', 1.0)
)
self.optimizer.step()
# Update learning rate
self.scheduler.step(total_loss)
# Update loss weights
if epoch % self.model.weight_update_freq == 0:
self.model.update_loss_weights(losses)
# Record loss history
loss_dict = {k: v.item() for k, v in losses.items()}
loss_dict['epoch'] = epoch
self.loss_history.append(loss_dict)
# Print progress
if epoch % self.config.get('print_freq', 100) == 0:
print(f"Epoch {epoch}: Total Loss = {total_loss.item():.6f}")
for loss_type, loss_value in losses.items():
if loss_type != 'total':
print(f" {loss_type.capitalize()} Loss = {loss_value.item():.6f}")
return {
'final_loss': total_loss.item(),
'loss_history': self.loss_history,
'model_state': self.model.state_dict()
}
def evaluate(self, test_data: Dict[str, torch.Tensor]) -> Dict:
"""Evaluate the trained PINN model"""
self.model.eval()
with torch.no_grad():
# Move test data to device
for key in test_data:
test_data[key] = test_data[key].to(self.device)
# Compute test losses
test_losses = self.model.compute_total_loss(**test_data)
# Compute predictions
x_test = test_data['x_data']
u_pred = self.model(x_test)
u_true = test_data['u_data']
# Calculate metrics
mse = torch.mean((u_pred - u_true) ** 2)
mae = torch.mean(torch.abs(u_pred - u_true))
# Relative error
relative_error = torch.mean(torch.abs(u_pred - u_true) / (torch.abs(u_true) + 1e-8))
return {
'test_losses': {k: v.item() for k, v in test_losses.items()},
'mse': mse.item(),
'mae': mae.item(),
'relative_error': relative_error.item(),
'predictions': u_pred.cpu().numpy(),
'ground_truth': u_true.cpu().numpy()
}
# Example usage for fluid dynamics
def create_fluid_dynamics_problem():
"""Create a fluid dynamics PINN problem"""
config = {
'input_dim': 3, # x, y, t
'output_dim': 3, # u, v, p
'hidden_dims': [128, 128, 128, 128],
'reynolds_number': 100.0,
'learning_rate': 0.001,
'data_weight': 1.0,
'physics_weight': 1.0,
'boundary_weight': 10.0,
'initial_weight': 10.0
}
# Create model
model = FluidDynamicsPINN(config)
# Define boundary conditions
def no_slip_boundary(x_boundary, u_boundary, model):
"""No-slip boundary condition: u = v = 0 on walls"""
pred = model(x_boundary)
u_pred = pred[:, 0:1]
v_pred = pred[:, 1:2]
return torch.cat([u_pred, v_pred], dim=1)
def inlet_boundary(x_boundary, u_boundary, model):
"""Inlet boundary condition: prescribed velocity profile"""
pred = model(x_boundary)
u_pred = pred[:, 0:1]
y = x_boundary[:, 1:2]
u_inlet = 4 * y * (1 - y) # Parabolic profile
return u_pred - u_inlet
model.add_boundary_condition(no_slip_boundary)
model.add_boundary_condition(inlet_boundary)
return model
# Example training data generation
def generate_training_data(num_points: int = 10000):
"""Generate training data for PINN"""
# Domain: x ∈ [0, 2], y ∈ [0, 1], t ∈ [0, 1]
x = torch.rand(num_points, 1) * 2
y = torch.rand(num_points, 1)
t = torch.rand(num_points, 1)
x_data = torch.cat([x, y, t], dim=1)
# Generate synthetic solution (for demonstration)
u_data = torch.sin(np.pi * x) * torch.cos(np.pi * y) * torch.exp(-t)
v_data = -torch.cos(np.pi * x) * torch.sin(np.pi * y) * torch.exp(-t)
p_data = torch.zeros_like(u_data)
u_data = torch.cat([u_data, v_data, p_data], dim=1)
return {
'x_data': x_data,
'u_data': u_data,
'x_physics': x_data, # Same points for physics loss
'x_boundary': x_data[:1000], # Subset for boundary
'u_boundary': u_data[:1000],
'x_initial': x_data[:1000], # Subset for initial conditions
'u_initial': u_data[:1000]
}
Related Technologies
PyTorch→
Deep learning framework for scientific ML
NumPy→
Numerical computing for scientific applications
SciPy→
Scientific computing library
Jupyter→
Interactive development for scientific computing
TensorFlow→
ML framework for large-scale scientific problems
JAX→
High-performance scientific computing with autodiff
CUDA→
GPU acceleration for scientific computing
Docker→
Containerization for reproducible scientific workflows