基本理论

本质上依然是神经网络,但是又把物理方程作为限制加入神经网络中使训练的结果满足物理规律。通过把物理方程的迭代前后的差值加到神经网络的损失函数里面去,让物理方程也“参与”到了训练过程。这样,神经网络在训练迭代时候优化的不仅仅是网络自己的损失函数,还有物理方程每次迭代的差,使得最后训练出来的结果就满足物理规律。

网上抄的小例子

import torch
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.optim as optim

# 创建一个简单的偏微分方程
def true_solution(x):
    return np.sin(np.pi * x)

def pde(x, u, du_dx):
    return -torch.exp(u) * du_dx - torch.sin(np.pi * x)

# 生成训练数据
x_train = np.random.rand(100, 1)
u_true = true_solution(x_train)
du_dx_true = np.pi * np.cos(np.pi * x_train)
f_true = pde(x_train, u_true, du_dx_true)

x_train = torch.from_numpy(x_train).float()
u_true = torch.from_numpy(u_true).float()
du_dx_true = torch.from_numpy(du_dx_true).float()
f_true = torch.from_numpy(f_true).float()

# 构建 PINN 模型
class PINN(nn.Module):
    def __init__(self):
        super(PINN, self).__init__()
        self.net = nn.Sequential(
            nn.Linear(1, 50),
            nn.Tanh(),
            nn.Linear(50, 50),
            nn.Tanh(),
            nn.Linear(50, 1),
        )
    
    def forward(self, x):
        return self.net(x)

# 训练 PINN 模型
model = PINN()
optimizer = optim.Adam(model.parameters(), lr=0.001)
criterion = nn.MSELoss()

epochs = 5000
for epoch in range(epochs):
    optimizer.zero_grad()
    u_pred = model(x_train)
    du_dx_pred = torch.autograd.grad(u_pred, x_train, grad_outputs=torch.ones_like(u_pred), create_graph=True)[0]
    f_pred = pde(x_train, u_pred, du_dx_pred)
    loss = criterion(u_pred, u_true) + criterion(f_pred, f_true)
    loss.backward()
    optimizer.step()
    
    if epoch % 100 == 0:
        print(f"Epoch [{epoch}/{epochs}], Loss: {loss.item()}")

# 可视化结果
x_test = np.linspace(0, 1, 100).reshape(-1, 1)
x_test = torch.from_numpy(x_test).float()
u_pred = model(x_test).detach().numpy()

plt.figure(figsize=(12, 5))
plt.plot(x_test, u_pred, label='Predicted')
plt.plot(x_test, true_solution(x_test), label='True')
plt.xlabel('x')
plt.ylabel('u')
plt.legend()
plt.show()