TypeError:fit_generator()获得了意外的关键字参数'nb_val_samples' [英] TypeError: fit_generator() got an unexpected keyword argument 'nb_val_samples'

查看:267
本文介绍了TypeError:fit_generator()获得了意外的关键字参数'nb_val_samples'的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

我正在尝试通过参考以下文章来进行手写分类:

I am trying to make a handwriting classifier by referencing the article: https://github.com/priya-dwivedi/Deep-Learning/blob/master/handwriting_recognition/English_Writer_Identification.ipynb.
while fitting the model I get an error saying that the fir_generator does not expect any such argument! Also while the error itself is a unexpected argument error the tag shows up as type error, I wonder if something is wrong with my pipeline.
Here is the model. (I am excluding all code after the error as it shouldn't be relevant any ways. You can reffer to the code in the above link if you feel its important)
Tensorflow Version - 1.14 , Keras version - 2.2.4

from __future__ import division
import numpy as np
import os
import glob
from PIL import Image  
from random import *  
from tensorflow.keras.utils 
import to_categorical 
from sklearn.preprocessing 
import LabelEncoder
import matplotlib.pyplot as plt
import matplotlib.image as mpimg 
%matplotlib inline

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Lambda, ELU, Activation, BatchNormalization
from tensorflow.keras.layers import Convolution2D, Cropping2D, ZeroPadding2D, MaxPooling2D 
from tensorflow.keras.optimizers import SGD, Adam, RMSprop
import tensorflow
import tensorflow.keras

# Create sentence writer mapping
#Dictionary with form and writer mapping
d = {}
with open('forms_for_parsing.txt') as f:
    for line in f:
        key = line.split(' ')[0]
        writer = line.split(' ')[1]
        d[key] = writer

tmp = []
target_list = []
path_to_files = os.path.join('datab', '*')
for filename in sorted(glob.glob(path_to_files)):
    tmp.append(filename)
    image_name = filename.split(os.sep)[1]
    file, ext = os.path.splitext(image_name)
    parts = file.split('-')
    form = parts[0] + '-' + parts[1]
    for key in d:
        if key == form:
            target_list.append(str(d[form]))

img_files = np.asarray(tmp)
img_targets = np.asarray(target_list)

# Visualizing the data
for filename in img_files[:3]:
    img=mpimg.imread(filename)
    plt.figure(figsize=(10,10))
    plt.imshow(img, cmap ='gray')

# Label Encode writer names for one hot encoding later
encoder = LabelEncoder()
encoder.fit(img_targets)
encoded_Y = encoder.transform(img_targets)

print(img_files[:5], img_targets[:5], encoded_Y[:5])

#split into test train and validation in ratio 4:1:1

from sklearn.model_selection import train_test_split 
train_files, rem_files, train_targets, rem_targets = train_test_split(
        img_files, encoded_Y, train_size=0.66, random_state=52, shuffle= True)

validation_files, test_files, validation_targets, test_targets = train_test_split(
        rem_files, rem_targets, train_size=0.5, random_state=22, shuffle=True)

print(train_files.shape, validation_files.shape, test_files.shape)
print(train_targets.shape, validation_targets.shape, test_targets.shape)

# Generator function for generating random crops from each sentence

# # Now create generators for randomly cropping 113x113 patches from these images

batch_size = 16 
num_classes = 50

# Start with train generator shared in the class and add image augmentations
def generate_data(samples, target_files,  batch_size=batch_size, factor = 0.1 ):
    num_samples = len(samples)
    from sklearn.utils import shuffle
    while 1: # Loop forever so the generator never terminates
        for offset in range(0, num_samples, batch_size):
            batch_samples = samples[offset:offset+batch_size]
            batch_targets = target_files[offset:offset+batch_size]

            images = []
            targets = []
            for i in range(len(batch_samples)):
                batch_sample = batch_samples[i]
                batch_target = batch_targets[i]
                im = Image.open(batch_sample)
                cur_width = im.size[0]
                cur_height = im.size[1]

                # print(cur_width, cur_height)
                height_fac = 113 / cur_height

                new_width = int(cur_width * height_fac)
                size = new_width, 113

                imresize = im.resize((size), Image.ANTIALIAS)  # Resize so height = 113 while keeping aspect ratio
                now_width = imresize.size[0]
                now_height = imresize.size[1]
                # Generate crops of size 113x113 from this resized image and keep random 10% of crops

                avail_x_points = list(range(0, now_width - 113 ))# total x start points are from 0 to width -113

                # Pick random x%
                pick_num = int(len(avail_x_points)*factor)

                # Now pick
                random_startx = sample(avail_x_points,  pick_num)

                for start in random_startx:
                    imcrop = imresize.crop((start, 0, start+113, 113))
                    images.append(np.asarray(imcrop))
                    targets.append(batch_target)

            # trim image to only see section with road
            X_train = np.array(images)
            y_train = np.array(targets)

            #reshape X_train for feeding in later
            X_train = X_train.reshape(X_train.shape[0], 113, 113, 1)   time , and use -1 
           
            X_train = X_train.astype('float32')  
            X_train /= 255

            #One hot encode y
            y_train = to_categorical(y_train, num_classes) 

            yield shuffle(X_train, y_train) # literraly shuffel 

train_generator = generate_data(train_files, train_targets, batch_size=batch_size, factor = 0.3)
validation_generator = generate_data(validation_files, validation_targets, batch_size=batch_size, factor = 0.3)
test_generator = generate_data(test_files, test_targets, batch_size=batch_size, factor = 0.1)

history_object = model.fit_generator(train_generator, steps_per_epoch= samples_per_epoch1,
                                     validation_data=validation_generator,
                                     nb_val_samples=nb_val_samples, nb_epoch=nb_epoch, verbose=1, callbacks=callbacks_list)

错误日志如下-

The error log is as follows-

---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-34-54937a660f6c> in <module>
      1 history_object = model.fit_generator(train_generator, steps_per_epoch= samples_per_epoch1,
      2                                      validation_data=validation_generator,
----> 3                                      nb_val_samples=nb_val_samples, nb_epoch=nb_epoch, verbose=1, callbacks=callbacks_list)

TypeError: fit_generator() got an unexpected keyword argument 'nb_val_samples'

推荐答案

在Keras 2.0之后, nb_val_samples 关键字被编码为 validation_steps .另外,我在您的代码中看到了 nb_epoch 关键字.它编码为 epochs .

After Keras 2.0, nb_val_samples keyword coded as validation_steps. Also, I saw nb_epoch keyword in your code. It coded as epochs.

如果您不想更改关键字,只需将Keras降级到2.0版以下

If you don't want to change your keywords, simply downgrade your Keras into below 2.0 version

这篇关于TypeError:fit_generator()获得了意外的关键字参数'nb_val_samples'的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
相关文章
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆