我正在尝试使用 TensoFlow 训练一个网络(一个 lrcn,即一个 CNN,后跟 LSTM),如下所示:
model=Sequential();
..
.
.
# my model
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit_generator(generator=training_generator,
validation_data=validation_generator,
use_multiprocessing=True,
workers=6)
我正在按照此链接创建生成器类。它看起来像这样:
class DataGenerator(tf.keras.utils.Sequence):
# 'Generates data for Keras'
def __init__(self, list_ids, labels, batch_size = 8, dim = (15, 16, 3200), n_channels = 1,
n_classes = 3, shuffle = True):
# 'Initialization'
self.dim = dim
self.batch_size = batch_size
self.labels = labels
self.list_IDs = list_ids
self.n_channels = n_channels
self.n_classes = n_classes
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
# 'Denotes the number of batches per epoch'
return int(np.floor(len(self.list_IDs) / self.batch_size))
def __getitem__(self, index):
# 'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]
# Find list of IDs
list_ids_temp = [self.list_IDs[k] for k in indexes]
# Generate data
X, y = self.__data_generation(list_ids_temp)
return X, y
def on_epoch_end(self):
# Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle:
np.random.shuffle(self.indexes)
def __data_generation(self, list_ids_temp):
# 'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.empty((self.batch_size, *self.dim, self.n_channels))
y = np.empty(self.batch_size, dtype = int)
sequences = np.empty((15, 16, 3200, self.n_channels))
# Generate data
for i, ID in enumerate(list_ids_temp):
with h5py.File(ID) as file:
_data = list(file['decimated_data'])
_npData = np.array(_data)
_allSequences = np.transpose(_npData)
# a 16 x 48000 matrix is split into 15 sequences of size 16x3200
for sq in range(15):
sequences[sq, :, :, :] = np.reshape(_allSequences[0:16, i:i + 3200], (16, 3200, 1))
# Store sample
X[i, ] = sequences
# Store class
y[i] = self.labels[ID]
return X, tf.keras.utils.to_categorical(y, num_classes = self.n_classes)
这工作正常并且代码运行,但是,我注意到 GPU 使用率保持为 0。当我将 log_device_placement 设置为 true 时,它显示分配给 GPU 的操作。但是当我使用任务管理器或监控 GPU 时nvidia-smi
,我看不到任何活动。
但是当我不使用 DataGenerator 类而只使用 model.fit() 使用如下所示的生成时,我注意到该程序确实使用了 GPU。
data = np.random.random((550, num_seq, rows, cols, ch))
label = np.random.random((num_of_samples,1))
_data['train'] = data[0:500,:]
_label['train'] = label[0:500, :]
_data['valid'] = data[500:,:]
_label['valid']=label[500:,:]
model.fit(data['train'],
labels['train'],
epochs = FLAGS.epochs,
batch_size = FLAGS.batch_size,
validation_data = (data['valid'], labels['valid']),
shuffle = True,
callbacks = [tb, early_stopper, checkpoint])'
所以我猜这不可能是因为我的 NVIDIA 驱动程序安装错误或 TensorFlow 安装不正确,这是我在运行这两个代码时收到的消息,这表明 TF 可以识别我的 GPU ,这让我相信有我的
DataGenerator
班级和/或fit_generator()
谁能帮我指出我做错了什么?
我在带有 GTX 1050Ti 的 Windows 10 机器上使用 TensorFlow 1.10 和 cUDA 9。