尝试腌制 ML 模型无法在 google colab 中腌制 _thread.RLock 对象 [英] trying to pickle ML model can't pickle _thread.RLock objects in google colab
问题描述
我正在 google colab 中使用 CNN 训练 MNIST 数据集,并想使用 pickle 保存模型,当我尝试保存模型时出现错误 can't pickle _thread.RLock objects>
我的代码
I am training a MNIST dataset using CNN in google colab and want to save the model using pickle and when i try saving the model I get the error can't pickle _thread.RLock objects
import pickle
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import keras
from keras.datasets import mnist
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Conv2D , MaxPooling2D, Dense, Flatten,Dropout
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
testRatio = 0.2
valRatio = 0.2
imageDimensions = (28,28,3)
batchSizeVal = 50
EPOCHS = 2
stepsPerEpoch = 2000
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train, X_validation , y_train , y_validation = train_test_split(X_train, y_train, test_size= valRatio)
X_train = X_train.reshape((48000, 28, 28, 1))
X_test = X_test.reshape((10000, 28, 28, 1))
X_validation = X_validation.reshape((12000, 28, 28, 1))
dataGen = ImageDataGenerator(width_shift_range = 0.1,
height_shift_range = 0.1,
zoom_range = 0.2,
shear_range = 0.1,
rotation_range= 10)
dataGen.fit(X_train)
y_train = to_categorical(y_train,10)
y_test= to_categorical(y_test,10)
y_validation = to_categorical(y_validation,10)
def myModel():
noOfFiters = 60
sizeOfFilter1 = (5,5)
sizeOfFilter2 = (3,3)
sizeOfPool = (2,2)
noOfNode = 500
model = Sequential()
model.add((Conv2D(noOfFiters, sizeOfFilter1,input_shape=(imageDimensions[0]
,imageDimensions[1],
1),
activation = "relu")))
model.add((Conv2D(noOfFiters, sizeOfFilter1, activation = "relu")))
model.add(MaxPooling2D(pool_size=sizeOfPool))
model.add((Conv2D(noOfFiters//2, sizeOfFilter2, activation = "relu")))
model.add((Conv2D(noOfFiters//2, sizeOfFilter2, activation = "relu")))
model.add(MaxPooling2D(pool_size=sizeOfPool))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(noOfNode,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10,activation='softmax'))
model.compile(Adam(lr=0.001),loss='categorical_crossentropy',
metrics=['accuracy'])
return model
model = myModel()
history = model.fit(dataGen.flow(X_train, y_train,
batch_size= batchSizeVal),
steps_per_epoch = stepsPerEpoch,
epochs =EPOCHS,
validation_data = (X_validation,y_validation),
shuffle= True)
plt.figure(1)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['training ', 'validation'])
plt.title("Loss")
plt.xlabel('epoch')
plt.figure(2)
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.legend(['training ', 'validation'])
plt.title("Accuracy")
plt.xlabel('epoch')
plt.show()
score = model.evaluate(X_test,y_test,verbose=0)
print("Test Score = ",score[0])
print("Test Accuracy = ",score[1])
pickle_out = open("model_trained.pickle","wb" )
model = pickle.dump(model,pickle_out)
pickle_out.close()
我该怎么做才能让它工作.我试图将运行时更改为 cpu,因为我认为这是由 gpu 引起的,但即使如此它也不起作用
What should I do to get it working. I tried to change the runtime to cpu because I thought this is causing by gpu but even then it is not working
推荐答案
Keras 不支持 Pickle 序列化其对象(模型).基本上,如果一个对象有 __getstate__
和 __setstate__
方法,pickle 将使用它们 来序列化对象.问题是 Keras 模型没有实现这些.
Keras doesn't support Pickle to serialize its objects (Models). Basically, if an object has __getstate__
and __setstate__
methods, pickle will use them to serialize the object. The problem is that Keras Model doesn't implement these.
@Zach Moshe,提出了解决此问题的修补程序.有关更多详细信息,请参阅他的博客.
@Zach Moshe, proposed hotfix for solving this issue. For more details please refer his blog.
# Hotfix function
def make_keras_picklable():
def __getstate__(self):
model_str = ""
with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
save_model(self, fd.name, overwrite=True)
model_str = fd.read()
d = {'model_str': model_str}
return d
def __setstate__(self, state):
with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
fd.write(state['model_str'])
fd.flush()
model = load_model(fd.name)
self.__dict__ = model.__dict__
cls = Model
cls.__getstate__ = __getstate__
cls.__setstate__ = __setstate__
# Run the function
make_keras_picklable()
请参考下面的工作代码
import pickle
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import keras
from keras.datasets import mnist
from keras.utils import to_categorical
from tensorflow.keras.models import Sequential, load_model, save_model, Model
from keras.layers import Conv2D , MaxPooling2D, Dense, Flatten,Dropout
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
import tempfile
testRatio = 0.2
valRatio = 0.2
imageDimensions = (28,28,3)
batchSizeVal = 50
EPOCHS = 2
stepsPerEpoch = 2000
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train, X_validation , y_train , y_validation = train_test_split(X_train, y_train, test_size= valRatio)
X_train = X_train.reshape((48000, 28, 28, 1))
X_test = X_test.reshape((10000, 28, 28, 1))
X_validation = X_validation.reshape((12000, 28, 28, 1))
# Hotfix function
def make_keras_picklable():
def __getstate__(self):
model_str = ""
with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
save_model(self, fd.name, overwrite=True)
model_str = fd.read()
d = {'model_str': model_str}
return d
def __setstate__(self, state):
with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
fd.write(state['model_str'])
fd.flush()
model = load_model(fd.name)
self.__dict__ = model.__dict__
cls = Model
cls.__getstate__ = __getstate__
cls.__setstate__ = __setstate__
# Run the function
make_keras_picklable()
dataGen = ImageDataGenerator(width_shift_range = 0.1,
height_shift_range = 0.1,
zoom_range = 0.2,
shear_range = 0.1,
rotation_range= 10)
dataGen.fit(X_train)
y_train = to_categorical(y_train,10)
y_test= to_categorical(y_test,10)
y_validation = to_categorical(y_validation,10)
def myModel():
noOfFiters = 60
sizeOfFilter1 = (5,5)
sizeOfFilter2 = (3,3)
sizeOfPool = (2,2)
noOfNode = 500
model = Sequential()
model.add((Conv2D(noOfFiters, sizeOfFilter1,input_shape=(imageDimensions[0]
,imageDimensions[1],
1),
activation = "relu")))
model.add((Conv2D(noOfFiters, sizeOfFilter1, activation = "relu")))
model.add(MaxPooling2D(pool_size=sizeOfPool))
model.add((Conv2D(noOfFiters//2, sizeOfFilter2, activation = "relu")))
model.add((Conv2D(noOfFiters//2, sizeOfFilter2, activation = "relu")))
model.add(MaxPooling2D(pool_size=sizeOfPool))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(noOfNode,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10,activation='softmax'))
model.compile(Adam(lr=0.001),loss='categorical_crossentropy',
metrics=['accuracy'])
return model
model = myModel()
history = model.fit(dataGen.flow(X_train, y_train,
batch_size= batchSizeVal),
steps_per_epoch = X_train.shape[0]//batchSizeVal,
epochs =EPOCHS,
validation_data = (X_validation,y_validation),
shuffle= True)
score = model.evaluate(X_test,y_test,verbose=0)
print("Test Score = ",score[0])
print("Test Accuracy = ",score[1])
with open('model.pkl', 'wb') as f:
pickle.dump(model, f)
输出:
Epoch 1/2
960/960 [==============================] - 338s 352ms/step - loss: 1.0066 - accuracy: 0.6827 - val_loss: 0.1417 - val_accuracy: 0.9536
Epoch 2/2
960/960 [==============================] - 338s 352ms/step - loss: 0.3542 - accuracy: 0.8905 - val_loss: 0.0935 - val_accuracy: 0.9719
Test Score = 0.07476004958152771
Test Accuracy = 0.9761999845504761
这篇关于尝试腌制 ML 模型无法在 google colab 中腌制 _thread.RLock 对象的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!