Slips
Stratosphere Linux IPS
Loading...
Searching...
No Matches
rnn_model_training Namespace Reference

Variables

 parser = argparse.ArgumentParser()
 
 help
 
 type
 
 required
 
 default
 
 args = parser.parse_args()
 
f = lambda x[: args.max_letters]
 
 df
 
 axis
 
 how
 
 inplace
 
 columns
 
 indexNames = df[df['state'].str.len() < args.min_letters].index
 
 vocabulary = list('abcdefghiABCDEFGHIrstuvwxyzRSTUVWXYZ1234567890,.+*')
 
dict int_of_letters = {}
 
 vocabulary_size = len(int_of_letters)
 
int features_per_sample = 1
 
 x_data = df['state'].to_numpy()
 
 y_data = df['label'].to_numpy()
 
 max_length_of_outtupple = max([len(sublist) for sublist in df.state.to_list()])
 
 padded_x_data
 
 train_x_data = padded_x_data
 
 train_y_data = y_data
 
 num_outtuples = train_x_data.shape[0]
 
 timesteps = max_length_of_outtupple
 
tuple input_shape = (timesteps, features_per_sample)
 
 model = tf.keras.models.Sequential()
 
 loss = history.history['loss']
 
 optimizer
 
 metrics
 
 history
 
 model_file
 
 overwrite
 
 acc = history.history['accuracy']
 
 val_acc = history.history['val_accuracy']
 
 val_loss = history.history['val_loss']
 
 epochs = range(1, len(acc) + 1)
 
 label
 

Variable Documentation

◆ acc

rnn_model_training.acc = history.history['accuracy']

◆ args

rnn_model_training.args = parser.parse_args()

◆ axis

rnn_model_training.axis

◆ columns

rnn_model_training.columns

◆ default

rnn_model_training.default

◆ df

rnn_model_training.df
Initial value:
1= pd.read_csv(
2 csvfile,
3 delimiter='|',
4 names=['note', 'label', 'model_id', 'state'],
5 skipinitialspace=True,
6 converters={'state': f},
7 )

◆ epochs

rnn_model_training.epochs = range(1, len(acc) + 1)

◆ f

x rnn_model_training.f = lambda x[: args.max_letters]

◆ features_per_sample

int rnn_model_training.features_per_sample = 1

◆ help

rnn_model_training.help

◆ history

rnn_model_training.history
Initial value:
1= model.fit(
2 train_x_data,
3 train_y_data,
4 epochs=args.epochs,
5 batch_size=args.batch_size,
6 validation_split=0.1,
7 verbose=1,
8 shuffle=True,
9)

◆ how

rnn_model_training.how

◆ indexNames

rnn_model_training.indexNames = df[df['state'].str.len() < args.min_letters].index

◆ inplace

rnn_model_training.inplace

◆ input_shape

tuple rnn_model_training.input_shape = (timesteps, features_per_sample)

◆ int_of_letters

dict rnn_model_training.int_of_letters = {}

◆ label

rnn_model_training.label

◆ loss

rnn_model_training.loss = history.history['loss']

◆ max_length_of_outtupple

rnn_model_training.max_length_of_outtupple = max([len(sublist) for sublist in df.state.to_list()])

◆ metrics

rnn_model_training.metrics

◆ model

rnn_model_training.model = tf.keras.models.Sequential()

◆ model_file

rnn_model_training.model_file

◆ num_outtuples

rnn_model_training.num_outtuples = train_x_data.shape[0]

◆ optimizer

rnn_model_training.optimizer

◆ overwrite

rnn_model_training.overwrite

◆ padded_x_data

rnn_model_training.padded_x_data
Initial value:
1= pad_sequences(
2 x_data, maxlen=max_length_of_outtupple, padding='post'
3)

◆ parser

rnn_model_training.parser = argparse.ArgumentParser()

◆ required

rnn_model_training.required

◆ timesteps

rnn_model_training.timesteps = max_length_of_outtupple

◆ train_x_data

rnn_model_training.train_x_data = padded_x_data

◆ train_y_data

rnn_model_training.train_y_data = y_data

◆ type

rnn_model_training.type

◆ val_acc

rnn_model_training.val_acc = history.history['val_accuracy']

◆ val_loss

rnn_model_training.val_loss = history.history['val_loss']

◆ vocabulary

rnn_model_training.vocabulary = list('abcdefghiABCDEFGHIrstuvwxyzRSTUVWXYZ1234567890,.+*')

◆ vocabulary_size

rnn_model_training.vocabulary_size = len(int_of_letters)

◆ x_data

rnn_model_training.x_data = df['state'].to_numpy()

◆ y_data

rnn_model_training.y_data = df['label'].to_numpy()