如何使用西储大学轴承故障诊断数据集来进行故障诊断和分类。(故障诊断代码)解读西储大学数据集,轴承数据的预处理和数据集的制作,基于Python故障诊断和分类的研究思路 西储大学故障诊断代码

故障诊断代码全覆盖(CWRU西储大学数据集)
完整故障诊断代码

import numpy as np
import scipy.io
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.nn.functional as F

# Step 1: Load and preprocess data
def load_cwru_data(file_path):
    mat = scipy.io.loadmat(file_path)
    data = mat['X096_DE_time']  # Example for 0 HP, Normal condition
    return data.flatten()

def create_segments_and_labels(X, y, time_steps=1024, step=1024):
    segments = []
    labels = []
    for i in range(0, len(X) - time_steps, step):
        segments.append(X[i:i+time_steps])
        labels.append(y)
    return np.asarray(segments), np.asarray(labels)

# Load normal data
normal_data = load_cwru_data('CWRU_Data/0_hp/normal_0_deg.mat')
scaler = StandardScaler()
normal_data_scaled = scaler.fit_transform(normal_data.reshape(-1, 1)).flatten()

# Create dataset for normal condition
X_normal, y_normal = create_segments_and_labels(normal_data_scaled, 0, 1024, 1024)

# Load inner race fault data
inner_fault_data = load_cwru_data('CWRU_Data/0_hp/inner_race_07_0_deg.mat')
inner_fault_data_scaled = scaler.fit_transform(inner_fault_data.reshape(-1, 1)).flatten()

# Create dataset for inner race fault condition
X_inner, y_inner = create_segments_and_labels(inner_fault_data_scaled, 1, 1024, 1024)

# Combine datasets
X = np.concatenate((X_normal, X_inner))
y = np.concatenate((y_normal, y_inner))

# Shuffle the dataset
indices = np.arange(len(X))
np.random.shuffle(indices)
X = X[indices]
y = y[indices]

# Step 2: Define dataset and dataloader
class CWRDDataset(Dataset):
    def __init__(self, X, y):
        self.X = torch.tensor(X, dtype=torch.float32).unsqueeze(1)  # Add channel dimension
        self.y = torch.tensor(y, dtype=torch.long)
    
    def __len__(self):
        return len(self.X)
    
    def __getitem__(self, idx):
        return self.X[idx], self.y[idx]

train_dataset = CWRDDataset(X, y)
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)

# Step 3: Define the model
class SimpleCNN(nn.Module):
    def __init__(self, num_classes=8):
        super(SimpleCNN, self).__init__()
        self.conv1 = nn.Conv1d(in_channels=1, out_channels=64, kernel_size=3, stride=1, padding=1)
        self.pool = nn.MaxPool1d(kernel_size=2, stride=2, padding=0)
        self.conv2 = nn.Conv1d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1)
        self.fc1 = nn.Linear(128 * 256, 512)
        self.fc2 = nn.Linear(512, num_classes)
    
    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = x.view(-1, 128 * 256)
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return x

model = SimpleCNN(num_classes=8)

# Step 4: Define loss function and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

# Step 5: Training loop
num_epochs = 50

for epoch in range(num_epochs):
    model.train()
    running_loss = 0.0
    
    for inputs, labels in train_loader:
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        
        running_loss += loss.item()
    
    print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {running_loss/len(train_loader):.4f}')

# Step 6: Evaluate the model
def evaluate_model(model, dataloader):
    model.eval()
    correct = 0
    total = 0
    
    with torch.no_grad():
        for inputs, labels in dataloader:
            outputs = model(inputs)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    
    accuracy = 100 * correct / total
    return accuracy

accuracy = evaluate_model(model, train_loader)
print(f'Accuracy: {accuracy:.2f}%')




内容包括:详细解读西储大学数据集,轴承数据的预处理和数据集的制作,基于Python故障诊断和分类的研究思路。
pytorch框架
可用于各种模型组合,文档解读,拿来就可以跑通,用于学习,还可以替换为自己的数据集。


好的,让我们详细介绍如何使用西储大学(Case Western Reserve University, CWRU)轴承故障诊断数据集来进行故障诊断和分类。我们将涵盖以下几个方面:

  1. 详细解读西储大学数据集
  2. 轴承数据的预处理
  3. 数据集的制作
  4. 基于Python的故障诊断和分类研究思路
  5. 使用PyTorch框架进行模型训练和评估

1. 详细解读西储大学数据集

数据集简介
  • 来源: Case Western Reserve University Bearing Data Center
  • 内容: 包含多种工况下的滚动轴承振动数据,包括正常运行、不同类型的故障(内圈故障、外圈故障、滚珠故障)以及不同的损伤程度。
  • 传感器: 加速度计
  • 采样频率: 12 kHz
  • 文件格式: .mat 文件
  • 数据集结构
  • 工况:
  • 正常 (Normal)
  • 内圈故障 (Inner Race Fault)
  • 外圈故障 (Outer Race Fault)
  • 滚珠故障 (Ball Fault)
  • 损伤程度:
  • 0.007英寸 (0.007in)
  • 0.014英寸 (0.014in)
  • 0.021英寸 (0.021in)
  • 负载条件:
  • 0 HP (N6kW)
  • 1 HP (F0hp)
  • 2 HP (F1hp)
  • 3 HP (F2hp)
  • 2. 轴承数据的预处理

    导入库
    import numpy as np
    import scipy.io
    import matplotlib.pyplot as plt
    from sklearn.preprocessing import StandardScaler
    
    加载数据
    def load_cwru_data(file_path):
        mat = scipy.io.loadmat(file_path)
        data = mat['X096_DE_time']  # Example for 0 HP, Normal condition
        return data.flatten()
    
    可视化数据
    data = load_cwru_data('CWRU_Data/0_hp/normal_0_deg.mat')
    plt.figure(figsize=(12, 6))
    plt.plot(data[:1000])
    plt.title('Raw Vibration Signal (First 1000 Samples)')
    plt.xlabel('Sample Index')
    plt.ylabel('Amplitude')
    plt.show()
    
    数据标准化
    scaler = StandardScaler()
    data_scaled = scaler.fit_transform(data.reshape(-1, 1)).flatten()
    

    3. 数据集的制作

    划分窗口
    def create_segments_and_labels(X, y, time_steps=1024, step=1024):
        segments = []
        labels = []
        for i in range(0, len(X) - time_steps, step):
            segments.append(X[i:i+time_steps])
            labels.append(y)
        return np.asarray(segments), np.asarray(labels)
    
    创建数据集
    # 示例:创建正常运行的数据集
    normal_data = load_cwru_data('CWRU_Data/0_hp/normal_0_deg.mat')
    normal_data_scaled = scaler.fit_transform(normal_data.reshape(-1, 1)).flatten()
    
    time_steps = 1024
    step = 1024
    
    X_normal, y_normal = create_segments_and_labels(normal_data_scaled, 0, time_steps, step)
    
    # 示例:创建内圈故障的数据集
    inner_fault_data = load_cwru_data('CWRU_Data/0_hp/inner_race_07_0_deg.mat')
    inner_fault_data_scaled = scaler.fit_transform(inner_fault_data.reshape(-1, 1)).flatten()
    
    X_inner, y_inner = create_segments_and_labels(inner_fault_data_scaled, 1, time_steps, step)
    
    # 合并数据集
    X = np.concatenate((X_normal, X_inner))
    y = np.concatenate((y_normal, y_inner))
    
    # 打乱数据集
    indices = np.arange(len(X))
    np.random.shuffle(indices)
    X = X[indices]
    y = y[indices]
    

    4. 基于Python的故障诊断和分类研究思路

    模型选择

    我们可以使用多种深度学习模型进行故障诊断,例如卷积神经网络(CNN)、长短时记忆网络(LSTM)等。

    训练流程
    1. 数据加载
    2. 模型定义
    3. 损失函数与优化器
    4. 训练循环
    5. 评估

    5. 使用PyTorch框架进行模型训练和评估

    安装依赖项
    pip install torch torchvision torchaudio
    
    数据加载器
    import torch
    from torch.utils.data import Dataset, DataLoader
    
    class CWRDDataset(Dataset):
        def __init__(self, X, y):
            self.X = torch.tensor(X, dtype=torch.float32).unsqueeze(1)  # Add channel dimension
            self.y = torch.tensor(y, dtype=torch.long)
        
        def __len__(self):
            return len(self.X)
        
        def __getitem__(self, idx):
            return self.X[idx], self.y[idx]
    
    train_dataset = CWRDDataset(X, y)
    train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
    
    模型定义
    import torch.nn as nn
    import torch.nn.functional as F
    
    class SimpleCNN(nn.Module):
        def __init__(self, num_classes=8):
            super(SimpleCNN, self).__init__()
            self.conv1 = nn.Conv1d(in_channels=1, out_channels=64, kernel_size=3, stride=1, padding=1)
            self.pool = nn.MaxPool1d(kernel_size=2, stride=2, padding=0)
            self.conv2 = nn.Conv1d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1)
            self.fc1 = nn.Linear(128 * 256, 512)
            self.fc2 = nn.Linear(512, num_classes)
        
        def forward(self, x):
            x = self.pool(F.relu(self.conv1(x)))
            x = self.pool(F.relu(self.conv2(x)))
            x = x.view(-1, 128 * 256)
            x = F.relu(self.fc1(x))
            x = self.fc2(x)
            return x
    
    model = SimpleCNN(num_classes=8)
    
    损失函数与优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    
    训练循环
    num_epochs = 50
    
    for epoch in range(num_epochs):
        model.train()
        running_loss = 0.0
        
        for inputs, labels in train_loader:
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            
            running_loss += loss.item()
        
        print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {running_loss/len(train_loader):.4f}')
    
    评估模型
    def evaluate_model(model, dataloader):
        model.eval()
        correct = 0
        total = 0
        
        with torch.no_grad():
            for inputs, labels in dataloader:
                outputs = model(inputs)
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()
        
        accuracy = 100 * correct / total
        return accuracy
    
    accuracy = evaluate_model(model, train_loader)
    print(f'Accuracy: {accuracy:.2f}%')
    

    完整代码示例

    以下是完整的代码示例,涵盖了数据加载、预处理、模型定义、训练和评估。

    文档解读

    数据集目录结构
    CWRU_Data/
    ├── 0_hp/
    │   ├── normal_0_deg.mat
    │   ├── inner_race_07_0_deg.mat
    │   └── ...
    ├── 1_hp/
    │   ├── normal_0_deg.mat
    │   ├── inner_race_07_0_deg.mat
    │   └── ...
    └── ...
    
    数据加载路径

    确保在 load_cwru_data 函数中正确指定数据文件的路径。

    替换为自己的数据集

    要替换为自己的数据集,只需修改数据加载部分,确保数据格式一致,并调整类别标签。以下是一个简单的示例:

    # 自定义数据加载函数
    def load_custom_data(file_path):
        # 根据你的数据格式加载数据
        data = np.loadtxt(file_path)
        return data.flatten()
    
    # 加载自定义数据
    custom_data = load_custom_data('path/to/custom_data.csv')
    custom_data_scaled = scaler.fit_transform(custom_data.reshape(-1, 1)).flatten()
    
    # 创建自定义数据集
    X_custom, y_custom = create_segments_and_labels(custom_data_scaled, 2, 1024, 1024)
    
    # 合并数据集
    X = np.concatenate((X, X_custom))
    y = np.concatenate((y, y_custom))
    
    # 打乱数据集
    indices = np.arange(len(X))
    np.random.shuffle(indices)
    X = X[indices]
    y = y[indices]
    

    通过以上步骤,你可以使用西储大学轴承故障诊断数据集进行故障诊断和分类。

    作者:计算机C9硕士_算法工程师

    物联沃分享整理
    物联沃-IOTWORD物联网 » 如何使用西储大学轴承故障诊断数据集来进行故障诊断和分类。(故障诊断代码)解读西储大学数据集,轴承数据的预处理和数据集的制作,基于Python故障诊断和分类的研究思路 西储大学故障诊断代码

    发表回复