理解神经网络,第三部分神经网络和人工智能
扫描二维码
随时随地手机看文章
主要差异
1. 结构
传统的RNN:
def create_rnn():
return tf.keras.Sequential([
tf.keras.layers.LSTM(64, return_sequences=True),
tf.keras.layers.LSTM(32),
tf.keras.layers.Dense(10)
])
简单transformer:
def create_transformer():
return tf.keras.Sequential([
tf.keras.layers.MultiHeadAttention(num_heads=8, key_dim=64),
tf.keras.layers.LayerNormalization(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10)
])
2. 处理能力
神经网络 :顺序处理。
变形器 :并行处理带有注意机制。
3. 用例
神经网络 传统的计算机任务,计算机视觉。
变形器 :NLP,大型语言模型。
实际推行技巧
1. 选型
def choose_model(task_type, input_shape):
if task_type == 'image':
return create_cnn()
elif task_type == 'sequence':
return create_rnn()
else:
return create_basic_nn()
2. 超参数调整
from keras_tuner import RandomSearch
def tune_hyperparameters(model_builder, x_train, y_train):
tuner = RandomSearch(
model_builder,
objective='val_accuracy',
max_trials=5
)
tuner.search(x_train, y_train,
epochs=5,
validation_split=0.2)
return tuner.get_best_hyperparameters()[0]
现实世界案例研究
1.医学影像分析
实例:COVD-19X射线分类:
def create_medical_cnn():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(224, 224, 3)),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(2, activation='softmax')
])
return model
定制数据生成器,增加:
def create_medical_data_generator():
return tf.keras.preprocessing.image.ImageDataGenerator(
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
validation_split=0.2,
preprocessing_function=tf.keras.applications.resnet50.preprocess_input
)
2.财务时间序列预测
例子:股价预测:
def create_financial_lstm():
model = tf.keras.Sequential([
tf.keras.layers.LSTM(50, return_sequences=True, input_shape=(60, 5)),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.LSTM(50, return_sequences=False),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(1)])
return
财务数据特征工程:
def prepare_financial_data(df, look_back=60):
features = ['Open', 'High', 'Low', 'Close', 'Volume']
scaler = MinMaxScaler()
scaled_data = scaler.fit_transform(df[features])
X, y = [], []
for i in range(look_back, len(scaled_data)):
X.append(scaled_data[i-look_back:i])
y.append(scaled_data[i, 3]) # Predicting Close price
return np.array(X), np.array(y), scaler
示范部署指南
1.模型优化
量化:
def quantize_model(model):
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
tflite_model = converter.convert()
return tflite_model
修剪:
def create_pruned_model(model, training_data):
pruning_params = {
'pruning_schedule': tfmot.sparsity.keras.PolynomialDecay(
initial_sparsity=0.30,
final_sparsity=0.80,
begin_step=0,
end_step=end_step)
}
model_pruned = tfmot.sparsity.keras.prune_low_magnitude(
model, pruning_params)
return model_pruned
2.生产部署
提供模式服务的瓶状API:
from flask import Flask, request, jsonify
app = Flask(__name__)
model = None
def load_model():
global model
model = tf.keras.models.load_model('path/to/model')
@app.route('/predict', methods=['POST'])
def predict():
data = request.json['data']
processed_data = preprocess_input(data)
prediction = model.predict(processed_data)
return jsonify({'prediction': prediction.tolist()})
文档文件:
FROM python:3.8-slim
WORKDIR /app
COPY requirements.txt .
RUN pip install -r requirements.txt
COPY . .
CMD ["python", "app.py"]
最新的Transformers创新
1. Vision Transformers (ViT)
def create_vit_model(input_shape, num_classes):
inputs = tf.keras.Input(shape=input_shape)
# Patch embedding
patches = tf.keras.layers.Conv2D(filters=768, kernel_size=16, strides=16)(inputs)
flat_patches = tf.keras.layers.Reshape((-1, 768))(patches)
# Position embedding
positions = tf.range(start=0, limit=flat_patches.shape[1], delta=1)
pos_embedding = tf.keras.layers.Embedding(input_dim=flat_patches.shape[1], output_dim=768)(positions)
x = flat_patches + pos_embedding
# Transformer blocks
for _ in range(12):
x = transformer_block(x)
x = tf.keras.layers.GlobalAveragePooling1D()(x)
outputs = tf.keras.layers.Dense(num_classes, activation='softmax')(x)
return tf.keras.Model(inputs=inputs, outputs=outputs)
2. MLP-Mixer Architecture
def mlp_block(x, hidden_units, dropout_rate):
x = tf.keras.layers.Dense(hidden_units, activation='gelu')(x)
x = tf.keras.layers.Dense(x.shape[-1])(x)
x = tf.keras.layers.Dropout(dropout_rate)(x)
return x
def mixer_block(x, tokens_mlp_dim, channels_mlp_dim, dropout_rate):
# Token-mixing
y = tf.keras.layers.LayerNormalization()(x)
y = tf.transpose(y, perm=[0, 2, 1])
y = mlp_block(y, tokens_mlp_dim, dropout_rate)
y = tf.transpose(y, perm=[0, 2, 1])
x = x + y
# Channel-mixing
y = tf.keras.layers.LayerNormalization()(x)
y = mlp_block(y, channels_mlp_dim, dropout_rate)
return x + y
优化表现技巧
1.记忆管理
大型数据集定制数据生成器:
class DataGenerator(tf.keras.utils.Sequence):
def __init__(self, x_set, y_set, batch_size):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
def __len__(self):
return int(np.ceil(len(self.x) / self.batch_size))
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size]
return np.array(batch_x), np.array(batch_y)
2.培训优化
混合精密训练:
def enable_mixed_precision():
policy = tf.keras.mixed_precision.Policy('mixed_float16')
tf.keras.mixed_precision.set_global_policy(policy)
具有梯度积累的定制训练循环:
def enable_mixed_precision():
policy = tf.keras.mixed_precision.Policy('mixed_float16')
tf.keras.mixed_precision.set_global_policy(policy)
Custom training loop with gradient accumulation:
Python
def train_with_gradient_accumulation(model, dataset, accumulation_steps=4):
optimizer = tf.keras.optimizers.Adam()
gradients = [tf.zeros_like(v) for v in model.trainable_variables]
for step, (x_batch, y_batch) in enumerate(dataset):
with tf.GradientTape() as tape:
predictions = model(x_batch, training=True)
loss = compute_loss(y_batch, predictions)
loss = loss / accumulation_steps
grads = tape.gradient(loss, model.trainable_variables)
gradients = [(acc_grad + grad) for acc_grad, grad in zip(gradients, grads)]
if (step + 1) % accumulation_steps == 0:
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
gradients = [tf.zeros_like(v) for v in model.trainable_variables]
追加资源
1. 网上学习平台
课程:深入学习专业化
快速综合学习:实际深入学习
张力流官方教程
2. 书籍
伊恩·古德费罗的《深度学习》
迈克尔尼尔森的《神经网络与深度学习》
3. 实践平台
卡格尔:真实世界数据集和竞争
谷歌COLAB:免费的GPU接入
张力流游乐场:互动可视化
结论
神经网络是不断发展的强大工具。本指南提供了基础,但该领域正在迅速发展。坚持实验,保持好奇,记住实践经验是最好的老师。
下面是一些成功的秘诀:
从简单的架构开始。
彻底了解你的数据。
仔细监控培训指标。
为您的模型使用版本控制。
继续进行最新的研究.
记住 :最好的学习方法是实现和试验不同的架构和数据集。