feat: add option to build linear model
This commit is contained in:
parent
ad3eb16333
commit
91758cfffa
@ -19,6 +19,9 @@ from tensorflow.keras.models import Model
|
|||||||
from tensorflow.keras.preprocessing.image import load_img, img_to_array
|
from tensorflow.keras.preprocessing.image import load_img, img_to_array
|
||||||
from tensorflow.python.client import device_lib
|
from tensorflow.python.client import device_lib
|
||||||
|
|
||||||
|
MODEL_CATEGORICAL = "categorical"
|
||||||
|
MODEL_LINEAR = "linear"
|
||||||
|
|
||||||
|
|
||||||
def linear_bin(a: float, N: int = 15, offset: int = 1, R: float = 2.0):
|
def linear_bin(a: float, N: int = 15, offset: int = 1, R: float = 2.0):
|
||||||
"""
|
"""
|
||||||
@ -57,7 +60,7 @@ def unzip_file(root, f):
|
|||||||
zip_ref.close()
|
zip_ref.close()
|
||||||
|
|
||||||
|
|
||||||
def train(batch_size: int, slide_size: int, img_height: int, img_width: int, img_depth: int, horizon: int, drop: float):
|
def train(model_type: str, batch_size: int, slide_size: int, img_height: int, img_width: int, img_depth: int, horizon: int, drop: float):
|
||||||
# env = cs.TrainingEnvironment()
|
# env = cs.TrainingEnvironment()
|
||||||
|
|
||||||
print(device_lib.list_local_devices())
|
print(device_lib.list_local_devices())
|
||||||
@ -108,7 +111,13 @@ def train(batch_size: int, slide_size: int, img_height: int, img_width: int, img
|
|||||||
# imgs = np.reshape(images[0:25], (-1, img_height, img_width, img_depth))
|
# imgs = np.reshape(images[0:25], (-1, img_height, img_width, img_depth))
|
||||||
# tf.summary.image("25 training data examples", imgs, max_outputs=25, step=0)
|
# tf.summary.image("25 training data examples", imgs, max_outputs=25, step=0)
|
||||||
|
|
||||||
save_best = callbacks.ModelCheckpoint('/opt/ml/model/model_cat', monitor='val_loss', verbose=1,
|
model_filepath = '/opt/ml/model/model_other'
|
||||||
|
if model_type == MODEL_CATEGORICAL:
|
||||||
|
model_filepath = '/opt/ml/model/model_cat'
|
||||||
|
elif model_type == MODEL_LINEAR:
|
||||||
|
model_filepath = '/opt/ml/model/model_lin'
|
||||||
|
|
||||||
|
save_best = callbacks.ModelCheckpoint(model_filepath, monitor='val_loss', verbose=1,
|
||||||
save_best_only=True, mode='min')
|
save_best_only=True, mode='min')
|
||||||
early_stop = callbacks.EarlyStopping(monitor='val_loss',
|
early_stop = callbacks.EarlyStopping(monitor='val_loss',
|
||||||
min_delta=.0005,
|
min_delta=.0005,
|
||||||
@ -121,8 +130,8 @@ def train(batch_size: int, slide_size: int, img_height: int, img_width: int, img
|
|||||||
|
|
||||||
angle_cat_array = np.array([linear_bin(float(a)) for a in angle_array])
|
angle_cat_array = np.array([linear_bin(float(a)) for a in angle_array])
|
||||||
|
|
||||||
model = default_model(input_shape=(img_height - horizon, img_width, img_depth), drop=drop)
|
#model = default_model(input_shape=(img_height - horizon, img_width, img_depth), drop=drop)
|
||||||
#model = default_categorical(input_shape=(img_height - horizon, img_width, img_depth), drop=drop)
|
model = default_categorical(input_shape=(img_height - horizon, img_width, img_depth), drop=drop)
|
||||||
|
|
||||||
model.compile(optimizer='adam',
|
model.compile(optimizer='adam',
|
||||||
loss={'angle_out': 'categorical_crossentropy', },
|
loss={'angle_out': 'categorical_crossentropy', },
|
||||||
@ -150,7 +159,7 @@ def train(batch_size: int, slide_size: int, img_height: int, img_width: int, img
|
|||||||
tflite_model = converter.convert()
|
tflite_model = converter.convert()
|
||||||
|
|
||||||
# Save the model.
|
# Save the model.
|
||||||
with open('/opt/ml/model/model_' + str(img_width) + 'x' + str(img_height) + 'h' + str(horizon) + '.tflite',
|
with open('/opt/ml/model/model_' + model_type + '_' + str(img_width) + 'x' + str(img_height) + 'h' + str(horizon) + '.tflite',
|
||||||
'wb') as f:
|
'wb') as f:
|
||||||
f.write(tflite_model)
|
f.write(tflite_model)
|
||||||
|
|
||||||
@ -177,6 +186,8 @@ def core_cnn_layers(img_in: Input, img_height: int, img_width: int, drop: float,
|
|||||||
"""
|
"""
|
||||||
Returns the core CNN layers that are shared among the different models,
|
Returns the core CNN layers that are shared among the different models,
|
||||||
like linear, imu, behavioural
|
like linear, imu, behavioural
|
||||||
|
:param img_width: image width
|
||||||
|
:param img_height: image height
|
||||||
:param img_in: input layer of network
|
:param img_in: input layer of network
|
||||||
:param drop: dropout rate
|
:param drop: dropout rate
|
||||||
:param l4_stride: 4-th layer stride, default 1
|
:param l4_stride: 4-th layer stride, default 1
|
||||||
@ -236,20 +247,16 @@ def default_model(input_shape, drop):
|
|||||||
return model
|
return model
|
||||||
|
|
||||||
|
|
||||||
def default_n_linear(num_outputs, input_shape=(120, 160, 3), drop=0.2):
|
def default_linear(input_shape=(120, 160, 3), drop=0.2):
|
||||||
img_in = Input(shape=input_shape, name='img_in')
|
img_in = Input(shape=input_shape, name='img_in')
|
||||||
x = core_cnn_layers(img_in, img_width=input_shape[1], img_height=input_shape[0], drop=drop)
|
x = core_cnn_layers(img_in, img_width=input_shape[1], img_height=input_shape[0], drop=drop)
|
||||||
x = Dense(100, activation='relu', name='dense_1')(x)
|
x = Dense(100, activation='relu', name='dense_1')(x)
|
||||||
x = Dropout(drop)(x)
|
x = Dropout(drop)(x)
|
||||||
x = Dense(50, activation='relu', name='dense_2')(x)
|
x = Dense(50, activation='relu', name='dense_2')(x)
|
||||||
x = Dropout(drop)(x)
|
x = Dropout(drop)(x)
|
||||||
|
angle_out = Dense(1, activation='linear', name='angle_out')(x)
|
||||||
|
|
||||||
outputs = []
|
model = Model(inputs=[img_in], outputs=[angle_out], name='linear')
|
||||||
for i in range(num_outputs):
|
|
||||||
outputs.append(
|
|
||||||
Dense(1, activation='linear', name='n_outputs' + str(i))(x))
|
|
||||||
|
|
||||||
model = Model(inputs=[img_in], outputs=outputs, name='linear')
|
|
||||||
return model
|
return model
|
||||||
|
|
||||||
|
|
||||||
@ -263,8 +270,7 @@ def default_categorical(input_shape=(120, 160, 3), drop=0.2):
|
|||||||
# Categorical output of the angle into 15 bins
|
# Categorical output of the angle into 15 bins
|
||||||
angle_out = Dense(15, activation='softmax', name='angle_out')(x)
|
angle_out = Dense(15, activation='softmax', name='angle_out')(x)
|
||||||
|
|
||||||
model = Model(inputs=[img_in], outputs=[angle_out],
|
model = Model(inputs=[img_in], outputs=[angle_out], name='categorical')
|
||||||
name='categorical')
|
|
||||||
return model
|
return model
|
||||||
|
|
||||||
|
|
||||||
@ -278,10 +284,12 @@ if __name__ == "__main__":
|
|||||||
parser.add_argument("--horizon", type=int, default=0)
|
parser.add_argument("--horizon", type=int, default=0)
|
||||||
parser.add_argument("--batch_size", type=int, default=32)
|
parser.add_argument("--batch_size", type=int, default=32)
|
||||||
parser.add_argument("--drop", type=float, default=0.2)
|
parser.add_argument("--drop", type=float, default=0.2)
|
||||||
|
parser.add_argument("--model_type", type=str, default=MODEL_CATEGORICAL)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
params = vars(args)
|
params = vars(args)
|
||||||
train(
|
train(
|
||||||
|
model_type=params["model_type"],
|
||||||
batch_size=params["batch_size"],
|
batch_size=params["batch_size"],
|
||||||
slide_size=params["slide_size"],
|
slide_size=params["slide_size"],
|
||||||
img_height=params["img_height"],
|
img_height=params["img_height"],
|
||||||
|
Loading…
Reference in New Issue
Block a user