Compare commits
8 Commits
feat/linea
...
master
Author | SHA1 | Date | |
---|---|---|---|
415d39c5bb | |||
013a5135ae | |||
7d7d2af622 | |||
f6899fa277 | |||
44bbe77a5b | |||
d7756a7184 | |||
47653d723c | |||
2cbff4fdc8 |
35
Dockerfile
35
Dockerfile
@ -1,15 +1,26 @@
|
||||
FROM docker.io/tensorflow/tensorflow:2.6.0
|
||||
FROM docker.io/library/python:3.10-slim as base
|
||||
|
||||
COPY requirements.txt .
|
||||
RUN pip3 install --upgrade pip==20.0.2 && pip3 list && pip3 install -r requirements.txt \
|
||||
&& pip3 list
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
# copy the training script inside the container
|
||||
COPY src/tf_container/train.py /opt/ml/code/train.py
|
||||
|
||||
# define train.py as the script entry point
|
||||
ENV SAGEMAKER_PROGRAM train.py
|
||||
# Configure piwheels repo to use pre-compiled numpy wheels for arm
|
||||
RUN echo -n "[global]\nextra-index-url=https://www.piwheels.org/simple\n" >> /etc/pip.conf
|
||||
|
||||
|
||||
#################
|
||||
FROM base as builder
|
||||
|
||||
RUN apt-get update && apt-get install -y git && \
|
||||
pip3 install poetry==1.2.0 && \
|
||||
poetry self add "poetry-dynamic-versioning[plugin]"
|
||||
ADD . .
|
||||
|
||||
RUN poetry build
|
||||
|
||||
#################
|
||||
FROM base
|
||||
|
||||
COPY --from=builder dist/*.whl /tmp/
|
||||
RUN pip3 install /tmp/*whl
|
||||
|
||||
WORKDIR /tmp
|
||||
USER 1234
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/train"]
|
||||
|
@ -1,15 +0,0 @@
|
||||
FROM docker.io/tensorflow/tensorflow:2.6.0-gpu
|
||||
|
||||
COPY requirements.txt .
|
||||
RUN pip3 install --upgrade pip==20.0.2 && pip3 list && pip3 install -r requirements.txt \
|
||||
&& pip3 list
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
# copy the training script inside the container
|
||||
COPY src/tf_container/train.py /opt/ml/code/train.py
|
||||
|
||||
# define train.py as the script entry point
|
||||
ENV SAGEMAKER_PROGRAM train.py
|
||||
|
||||
|
@ -1 +0,0 @@
|
||||
docker build -t tensorflow:1.8.0-gpu-py3 -f Dockerfile.gpu .
|
@ -1,24 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
job_name=$1
|
||||
if [[ -z ${job_name} ]]
|
||||
then
|
||||
echo 'Provide model name'
|
||||
exit 0
|
||||
fi
|
||||
echo 'Creating training job '$1
|
||||
|
||||
training_image="117617958416.dkr.ecr.eu-west-1.amazonaws.com/robocars:tensorflow"
|
||||
iam_role_arn="arn:aws:iam::117617958416:role/robocar-training"
|
||||
DATA_BUCKET="s3://robocars-cyrilix-learning/input"
|
||||
DATA_OUTPUT="s3://robocars-cyrilix-learning/output"
|
||||
|
||||
aws sagemaker create-training-job \
|
||||
--training-job-name ${job_name} \
|
||||
--hyper-parameters '{ "sagemaker_region": "\"eu-west-1\"", "with_slide": "true", "img_height": "120", "img_width": "160" }' \
|
||||
--algorithm-specification TrainingImage="${training_image}",TrainingInputMode=File \
|
||||
--role-arn ${iam_role_arn} \
|
||||
--input-data-config "[{ \"ChannelName\": \"train\", \"DataSource\": { \"S3DataSource\": { \"S3DataType\": \"S3Prefix\", \"S3Uri\": \"${DATA_BUCKET}\", \"S3DataDistributionType\": \"FullyReplicated\" }} }]" \
|
||||
--output-data-config S3OutputPath=${DATA_OUTPUT} \
|
||||
--resource-config InstanceType=ml.p2.xlarge,InstanceCount=1,VolumeSizeInGB=1 \
|
||||
--stopping-condition MaxRuntimeInSeconds=1800
|
3
mypy.ini
Normal file
3
mypy.ini
Normal file
@ -0,0 +1,3 @@
|
||||
[mypy]
|
||||
strict = true
|
||||
plugins = numpy.typing.mypy_plugin
|
1250
poetry.lock
generated
Normal file
1250
poetry.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
27
pyproject.toml
Normal file
27
pyproject.toml
Normal file
@ -0,0 +1,27 @@
|
||||
[tool.poetry]
|
||||
name = "robocar-training"
|
||||
version = "0.0.0"
|
||||
description = "DIY Robocars model training"
|
||||
authors = ["Cyrille Nofficial <cynoffic@cyrilix.fr>"]
|
||||
license = "Apache2"
|
||||
readme = "README.md"
|
||||
packages = [{include = "tf_container"}, {include="tensorflow-stubs"}]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.10,<3.11"
|
||||
tensorflow = "^2.10.0"
|
||||
numpy = "^1.23.4"
|
||||
pillow = "^8.3.2"
|
||||
flatbuffers = "<2.11"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
mypy = "^0.982"
|
||||
pylint = "^2.15.5"
|
||||
tensor-annotations = "^2.0.2"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core>=1.0.0", "poetry-dynamic-versioning"]
|
||||
build-backend = "poetry_dynamic_versioning.backend"
|
||||
|
||||
[tool.poetry.scripts]
|
||||
train = 'tf_container.train:main'
|
@ -1,4 +0,0 @@
|
||||
sagemaker-training==3.9.2
|
||||
tensorflow==2.6.0
|
||||
numpy==1.19.5
|
||||
pillow==8.3.2
|
31
setup.py
31
setup.py
@ -1,31 +0,0 @@
|
||||
import os
|
||||
from os.path import basename
|
||||
from os.path import splitext
|
||||
|
||||
from glob import glob
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
|
||||
def read(fname):
|
||||
return open(os.path.join(os.path.dirname(__file__), fname)).read()
|
||||
|
||||
|
||||
setup(
|
||||
name='robocars_sagemaker_container',
|
||||
version='1.0.0',
|
||||
|
||||
packages=find_packages(where='src', exclude=('test',)),
|
||||
package_dir={'': 'src'},
|
||||
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
|
||||
|
||||
classifiers=[
|
||||
'Programming Language :: Python :: 3.7',
|
||||
],
|
||||
entry_points={
|
||||
'console_scripts': [
|
||||
'train=tf_container.train_entry_point:train',
|
||||
]
|
||||
},
|
||||
install_requires=['sagemaker-container-support'],
|
||||
extras_require={},
|
||||
)
|
@ -1,16 +0,0 @@
|
||||
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License").
|
||||
# You may not use this file except in compliance with the License.
|
||||
# A copy of the License is located at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# or in the "license" file accompanying this file. This file is distributed
|
||||
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
|
||||
# express or implied. See the License for the specific language governing
|
||||
# permissions and limitations under the License.
|
||||
|
||||
from tf_container.train_entry_point import train
|
||||
|
||||
__all__ = ['train']
|
0
tensorflow-stubs/__init__.py
Normal file
0
tensorflow-stubs/__init__.py
Normal file
5085
tensorflow-stubs/__init__.pyi
Executable file
5085
tensorflow-stubs/__init__.pyi
Executable file
File diff suppressed because it is too large
Load Diff
3
tensorflow-stubs/core/util/__init__.pyi
Normal file
3
tensorflow-stubs/core/util/__init__.pyi
Normal file
@ -0,0 +1,3 @@
|
||||
from typing import Any
|
||||
|
||||
event_pb2: Any
|
4
tensorflow-stubs/keras/__init__.pyi
Normal file
4
tensorflow-stubs/keras/__init__.pyi
Normal file
@ -0,0 +1,4 @@
|
||||
from keras.api._v2.keras import __internal__ as __internal__, activations as activations, applications as applications, backend as backend, callbacks as callbacks, constraints as constraints, datasets as datasets, dtensor as dtensor, estimator as estimator, experimental as experimental, initializers as initializers, layers as layers, losses as losses, metrics as metrics, mixed_precision as mixed_precision, models as models, optimizers as optimizers, preprocessing as preprocessing, regularizers as regularizers, utils as utils, wrappers as wrappers
|
||||
from keras.engine.input_layer import Input as Input
|
||||
from keras.engine.sequential import Sequential as Sequential
|
||||
from keras.engine.training import Model as Model
|
1
tensorflow-stubs/keras/__internal__/__init__.pyi
Normal file
1
tensorflow-stubs/keras/__internal__/__init__.pyi
Normal file
@ -0,0 +1 @@
|
||||
from keras.api._v2.keras.__internal__ import backend as backend, layers as layers, losses as losses, models as models, utils as utils
|
1
tensorflow-stubs/keras/__internal__/backend/__init__.pyi
Normal file
1
tensorflow-stubs/keras/__internal__/backend/__init__.pyi
Normal file
@ -0,0 +1 @@
|
||||
from keras.backend import track_variable as track_variable
|
2
tensorflow-stubs/keras/__internal__/layers/__init__.pyi
Normal file
2
tensorflow-stubs/keras/__internal__/layers/__init__.pyi
Normal file
@ -0,0 +1,2 @@
|
||||
from keras.engine.base_layer import BaseRandomLayer as BaseRandomLayer
|
||||
from keras.layers.preprocessing.image_preprocessing import BaseImageAugmentationLayer as BaseImageAugmentationLayer
|
1
tensorflow-stubs/keras/__internal__/losses/__init__.pyi
Normal file
1
tensorflow-stubs/keras/__internal__/losses/__init__.pyi
Normal file
@ -0,0 +1 @@
|
||||
from keras.utils.losses_utils import compute_weighted_loss as compute_weighted_loss
|
1
tensorflow-stubs/keras/__internal__/models/__init__.pyi
Normal file
1
tensorflow-stubs/keras/__internal__/models/__init__.pyi
Normal file
@ -0,0 +1 @@
|
||||
from keras.models.cloning import clone_and_build_model as clone_and_build_model, in_place_subclassed_model_state_restoration as in_place_subclassed_model_state_restoration
|
3
tensorflow-stubs/keras/__internal__/utils/__init__.pyi
Normal file
3
tensorflow-stubs/keras/__internal__/utils/__init__.pyi
Normal file
@ -0,0 +1,3 @@
|
||||
from keras.engine.data_adapter import get_data_handler as get_data_handler
|
||||
from keras.testing_infra.test_utils import layer_test as layer_test
|
||||
from keras.utils.tf_utils import register_symbolic_tensor_type as register_symbolic_tensor_type
|
1
tensorflow-stubs/keras/activations/__init__.pyi
Normal file
1
tensorflow-stubs/keras/activations/__init__.pyi
Normal file
@ -0,0 +1 @@
|
||||
from keras.activations import deserialize as deserialize, elu as elu, exponential as exponential, gelu as gelu, get as get, hard_sigmoid as hard_sigmoid, linear as linear, relu as relu, selu as selu, serialize as serialize, sigmoid as sigmoid, softmax as softmax, softplus as softplus, softsign as softsign, swish as swish, tanh as tanh
|
18
tensorflow-stubs/keras/applications/__init__.pyi
Normal file
18
tensorflow-stubs/keras/applications/__init__.pyi
Normal file
@ -0,0 +1,18 @@
|
||||
from keras.api._v2.keras.applications import convnext as convnext, densenet as densenet, efficientnet as efficientnet, efficientnet_v2 as efficientnet_v2, imagenet_utils as imagenet_utils, inception_resnet_v2 as inception_resnet_v2, inception_v3 as inception_v3, mobilenet as mobilenet, mobilenet_v2 as mobilenet_v2, mobilenet_v3 as mobilenet_v3, nasnet as nasnet, regnet as regnet, resnet as resnet, resnet50 as resnet50, resnet_rs as resnet_rs, resnet_v2 as resnet_v2, vgg16 as vgg16, vgg19 as vgg19, xception as xception
|
||||
from keras.applications.convnext import ConvNeXtBase as ConvNeXtBase, ConvNeXtLarge as ConvNeXtLarge, ConvNeXtSmall as ConvNeXtSmall, ConvNeXtTiny as ConvNeXtTiny, ConvNeXtXLarge as ConvNeXtXLarge
|
||||
from keras.applications.densenet import DenseNet121 as DenseNet121, DenseNet169 as DenseNet169, DenseNet201 as DenseNet201
|
||||
from keras.applications.efficientnet import EfficientNetB0 as EfficientNetB0, EfficientNetB1 as EfficientNetB1, EfficientNetB2 as EfficientNetB2, EfficientNetB3 as EfficientNetB3, EfficientNetB4 as EfficientNetB4, EfficientNetB5 as EfficientNetB5, EfficientNetB6 as EfficientNetB6, EfficientNetB7 as EfficientNetB7
|
||||
from keras.applications.efficientnet_v2 import EfficientNetV2B0 as EfficientNetV2B0, EfficientNetV2B1 as EfficientNetV2B1, EfficientNetV2B2 as EfficientNetV2B2, EfficientNetV2B3 as EfficientNetV2B3, EfficientNetV2L as EfficientNetV2L, EfficientNetV2M as EfficientNetV2M, EfficientNetV2S as EfficientNetV2S
|
||||
from keras.applications.inception_resnet_v2 import InceptionResNetV2 as InceptionResNetV2
|
||||
from keras.applications.inception_v3 import InceptionV3 as InceptionV3
|
||||
from keras.applications.mobilenet import MobileNet as MobileNet
|
||||
from keras.applications.mobilenet_v2 import MobileNetV2 as MobileNetV2
|
||||
from keras.applications.mobilenet_v3 import MobileNetV3Large as MobileNetV3Large, MobileNetV3Small as MobileNetV3Small
|
||||
from keras.applications.nasnet import NASNetLarge as NASNetLarge, NASNetMobile as NASNetMobile
|
||||
from keras.applications.regnet import RegNetX002 as RegNetX002, RegNetX004 as RegNetX004, RegNetX006 as RegNetX006, RegNetX008 as RegNetX008, RegNetX016 as RegNetX016, RegNetX032 as RegNetX032, RegNetX040 as RegNetX040, RegNetX064 as RegNetX064, RegNetX080 as RegNetX080, RegNetX120 as RegNetX120, RegNetX160 as RegNetX160, RegNetX320 as RegNetX320, RegNetY002 as RegNetY002, RegNetY004 as RegNetY004, RegNetY006 as RegNetY006, RegNetY008 as RegNetY008, RegNetY016 as RegNetY016, RegNetY032 as RegNetY032, RegNetY040 as RegNetY040, RegNetY064 as RegNetY064, RegNetY080 as RegNetY080, RegNetY120 as RegNetY120, RegNetY160 as RegNetY160, RegNetY320 as RegNetY320
|
||||
from keras.applications.resnet import ResNet101 as ResNet101, ResNet152 as ResNet152, ResNet50 as ResNet50
|
||||
from keras.applications.resnet_rs import ResNetRS101 as ResNetRS101, ResNetRS152 as ResNetRS152, ResNetRS200 as ResNetRS200, ResNetRS270 as ResNetRS270, ResNetRS350 as ResNetRS350, ResNetRS420 as ResNetRS420, ResNetRS50 as ResNetRS50
|
||||
from keras.applications.resnet_v2 import ResNet101V2 as ResNet101V2, ResNet152V2 as ResNet152V2, ResNet50V2 as ResNet50V2
|
||||
from keras.applications.vgg16 import VGG16 as VGG16
|
||||
from keras.applications.vgg19 import VGG19 as VGG19
|
||||
from keras.applications.xception import Xception as Xception
|
@ -0,0 +1 @@
|
||||
from keras.applications.convnext import ConvNeXtBase as ConvNeXtBase, ConvNeXtLarge as ConvNeXtLarge, ConvNeXtSmall as ConvNeXtSmall, ConvNeXtTiny as ConvNeXtTiny, ConvNeXtXLarge as ConvNeXtXLarge, decode_predictions as decode_predictions, preprocess_input as preprocess_input
|
@ -0,0 +1 @@
|
||||
from keras.applications.densenet import DenseNet121 as DenseNet121, DenseNet169 as DenseNet169, DenseNet201 as DenseNet201, decode_predictions as decode_predictions, preprocess_input as preprocess_input
|
@ -0,0 +1 @@
|
||||
from keras.applications.efficientnet import EfficientNetB0 as EfficientNetB0, EfficientNetB1 as EfficientNetB1, EfficientNetB2 as EfficientNetB2, EfficientNetB3 as EfficientNetB3, EfficientNetB4 as EfficientNetB4, EfficientNetB5 as EfficientNetB5, EfficientNetB6 as EfficientNetB6, EfficientNetB7 as EfficientNetB7, decode_predictions as decode_predictions, preprocess_input as preprocess_input
|
@ -0,0 +1 @@
|
||||
from keras.applications.efficientnet_v2 import EfficientNetV2B0 as EfficientNetV2B0, EfficientNetV2B1 as EfficientNetV2B1, EfficientNetV2B2 as EfficientNetV2B2, EfficientNetV2B3 as EfficientNetV2B3, EfficientNetV2L as EfficientNetV2L, EfficientNetV2M as EfficientNetV2M, EfficientNetV2S as EfficientNetV2S, decode_predictions as decode_predictions, preprocess_input as preprocess_input
|
@ -0,0 +1 @@
|
||||
from keras.applications.imagenet_utils import decode_predictions as decode_predictions, preprocess_input as preprocess_input
|
@ -0,0 +1 @@
|
||||
from keras.applications.inception_resnet_v2 import InceptionResNetV2 as InceptionResNetV2, decode_predictions as decode_predictions, preprocess_input as preprocess_input
|
@ -0,0 +1 @@
|
||||
from keras.applications.inception_v3 import InceptionV3 as InceptionV3, decode_predictions as decode_predictions, preprocess_input as preprocess_input
|
@ -0,0 +1 @@
|
||||
from keras.applications.mobilenet import MobileNet as MobileNet, decode_predictions as decode_predictions, preprocess_input as preprocess_input
|
@ -0,0 +1 @@
|
||||
from keras.applications.mobilenet_v2 import MobileNetV2 as MobileNetV2, decode_predictions as decode_predictions, preprocess_input as preprocess_input
|
@ -0,0 +1 @@
|
||||
from keras.applications.mobilenet_v3 import decode_predictions as decode_predictions, preprocess_input as preprocess_input
|
1
tensorflow-stubs/keras/applications/nasnet/__init__.pyi
Normal file
1
tensorflow-stubs/keras/applications/nasnet/__init__.pyi
Normal file
@ -0,0 +1 @@
|
||||
from keras.applications.nasnet import NASNetLarge as NASNetLarge, NASNetMobile as NASNetMobile, decode_predictions as decode_predictions, preprocess_input as preprocess_input
|
1
tensorflow-stubs/keras/applications/regnet/__init__.pyi
Normal file
1
tensorflow-stubs/keras/applications/regnet/__init__.pyi
Normal file
@ -0,0 +1 @@
|
||||
from keras.applications.regnet import RegNetX002 as RegNetX002, RegNetX004 as RegNetX004, RegNetX006 as RegNetX006, RegNetX008 as RegNetX008, RegNetX016 as RegNetX016, RegNetX032 as RegNetX032, RegNetX040 as RegNetX040, RegNetX064 as RegNetX064, RegNetX080 as RegNetX080, RegNetX120 as RegNetX120, RegNetX160 as RegNetX160, RegNetX320 as RegNetX320, RegNetY002 as RegNetY002, RegNetY004 as RegNetY004, RegNetY006 as RegNetY006, RegNetY008 as RegNetY008, RegNetY016 as RegNetY016, RegNetY032 as RegNetY032, RegNetY040 as RegNetY040, RegNetY064 as RegNetY064, RegNetY080 as RegNetY080, RegNetY120 as RegNetY120, RegNetY160 as RegNetY160, RegNetY320 as RegNetY320, decode_predictions as decode_predictions, preprocess_input as preprocess_input
|
1
tensorflow-stubs/keras/applications/resnet/__init__.pyi
Normal file
1
tensorflow-stubs/keras/applications/resnet/__init__.pyi
Normal file
@ -0,0 +1 @@
|
||||
from keras.applications.resnet import ResNet101 as ResNet101, ResNet152 as ResNet152, ResNet50 as ResNet50, decode_predictions as decode_predictions, preprocess_input as preprocess_input
|
@ -0,0 +1 @@
|
||||
from keras.applications.resnet import ResNet50 as ResNet50, decode_predictions as decode_predictions, preprocess_input as preprocess_input
|
@ -0,0 +1 @@
|
||||
from keras.applications.resnet_rs import ResNetRS101 as ResNetRS101, ResNetRS152 as ResNetRS152, ResNetRS200 as ResNetRS200, ResNetRS270 as ResNetRS270, ResNetRS350 as ResNetRS350, ResNetRS420 as ResNetRS420, ResNetRS50 as ResNetRS50, decode_predictions as decode_predictions, preprocess_input as preprocess_input
|
@ -0,0 +1 @@
|
||||
from keras.applications.resnet_v2 import ResNet101V2 as ResNet101V2, ResNet152V2 as ResNet152V2, ResNet50V2 as ResNet50V2, decode_predictions as decode_predictions, preprocess_input as preprocess_input
|
1
tensorflow-stubs/keras/applications/vgg16/__init__.pyi
Normal file
1
tensorflow-stubs/keras/applications/vgg16/__init__.pyi
Normal file
@ -0,0 +1 @@
|
||||
from keras.applications.vgg16 import VGG16 as VGG16, decode_predictions as decode_predictions, preprocess_input as preprocess_input
|
1
tensorflow-stubs/keras/applications/vgg19/__init__.pyi
Normal file
1
tensorflow-stubs/keras/applications/vgg19/__init__.pyi
Normal file
@ -0,0 +1 @@
|
||||
from keras.applications.vgg19 import VGG19 as VGG19, decode_predictions as decode_predictions, preprocess_input as preprocess_input
|
@ -0,0 +1 @@
|
||||
from keras.applications.xception import Xception as Xception, decode_predictions as decode_predictions, preprocess_input as preprocess_input
|
3
tensorflow-stubs/keras/backend/__init__.pyi
Normal file
3
tensorflow-stubs/keras/backend/__init__.pyi
Normal file
@ -0,0 +1,3 @@
|
||||
from keras.api._v2.keras.backend import experimental as experimental
|
||||
from keras.backend import abs as abs, all as all, any as any, arange as arange, argmax as argmax, argmin as argmin, backend as backend, batch_dot as batch_dot, batch_flatten as batch_flatten, batch_get_value as batch_get_value, batch_normalization as batch_normalization, batch_set_value as batch_set_value, bias_add as bias_add, binary_crossentropy as binary_crossentropy, binary_focal_crossentropy as binary_focal_crossentropy, cast as cast, cast_to_floatx as cast_to_floatx, categorical_crossentropy as categorical_crossentropy, clear_session as clear_session, clip as clip, concatenate as concatenate, constant as constant, conv1d as conv1d, conv2d as conv2d, conv2d_transpose as conv2d_transpose, conv3d as conv3d, cos as cos, count_params as count_params, ctc_batch_cost as ctc_batch_cost, ctc_decode as ctc_decode, ctc_label_dense_to_sparse as ctc_label_dense_to_sparse, cumprod as cumprod, cumsum as cumsum, depthwise_conv2d as depthwise_conv2d, dot as dot, dropout as dropout, dtype as dtype, elu as elu, equal as equal, eval as eval, exp as exp, expand_dims as expand_dims, eye as eye, flatten as flatten, foldl as foldl, foldr as foldr, function as function, gather as gather, get_uid as get_uid, get_value as get_value, gradients as gradients, greater as greater, greater_equal as greater_equal, hard_sigmoid as hard_sigmoid, in_test_phase as in_test_phase, in_top_k as in_top_k, in_train_phase as in_train_phase, int_shape as int_shape, is_keras_tensor as is_keras_tensor, is_sparse as is_sparse, l2_normalize as l2_normalize, learning_phase as learning_phase, learning_phase_scope as learning_phase_scope, less as less, less_equal as less_equal, local_conv1d as local_conv1d, local_conv2d as local_conv2d, log as log, manual_variable_initialization as manual_variable_initialization, map_fn as map_fn, max as max, maximum as maximum, mean as mean, min as min, minimum as minimum, moving_average_update as moving_average_update, name_scope as name_scope, ndim as ndim, normalize_batch_in_training as normalize_batch_in_training, not_equal as not_equal, one_hot as one_hot, ones as ones, ones_like as ones_like, permute_dimensions as permute_dimensions, placeholder as placeholder, pool2d as pool2d, pool3d as pool3d, pow as pow, print_tensor as print_tensor, prod as prod, random_bernoulli as random_bernoulli, random_binomial as random_binomial, random_normal as random_normal, random_normal_variable as random_normal_variable, random_uniform as random_uniform, random_uniform_variable as random_uniform_variable, relu as relu, repeat as repeat, repeat_elements as repeat_elements, reset_uids as reset_uids, reshape as reshape, resize_images as resize_images, resize_volumes as resize_volumes, reverse as reverse, rnn as rnn, round as round, separable_conv2d as separable_conv2d, set_learning_phase as set_learning_phase, set_value as set_value, shape as shape, sigmoid as sigmoid, sign as sign, sin as sin, softmax as softmax, softplus as softplus, softsign as softsign, sparse_categorical_crossentropy as sparse_categorical_crossentropy, spatial_2d_padding as spatial_2d_padding, spatial_3d_padding as spatial_3d_padding, sqrt as sqrt, square as square, squeeze as squeeze, stack as stack, std as std, stop_gradient as stop_gradient, sum as sum, switch as switch, tanh as tanh, temporal_padding as temporal_padding, tile as tile, to_dense as to_dense, transpose as transpose, truncated_normal as truncated_normal, update as update, update_add as update_add, update_sub as update_sub, var as var, variable as variable, zeros as zeros, zeros_like as zeros_like
|
||||
from keras.backend_config import epsilon as epsilon, floatx as floatx, image_data_format as image_data_format, set_epsilon as set_epsilon, set_floatx as set_floatx, set_image_data_format as set_image_data_format
|
1
tensorflow-stubs/keras/backend/experimental/__init__.pyi
Normal file
1
tensorflow-stubs/keras/backend/experimental/__init__.pyi
Normal file
@ -0,0 +1 @@
|
||||
from keras.backend import disable_tf_random_generator as disable_tf_random_generator, enable_tf_random_generator as enable_tf_random_generator, is_tf_random_generator_enabled as is_tf_random_generator_enabled
|
2
tensorflow-stubs/keras/callbacks/__init__.pyi
Normal file
2
tensorflow-stubs/keras/callbacks/__init__.pyi
Normal file
@ -0,0 +1,2 @@
|
||||
from keras.api._v2.keras.callbacks import experimental as experimental
|
||||
from keras.callbacks import BackupAndRestore as BackupAndRestore, BaseLogger as BaseLogger, CSVLogger as CSVLogger, Callback as Callback, CallbackList as CallbackList, EarlyStopping as EarlyStopping, History as History, LambdaCallback as LambdaCallback, LearningRateScheduler as LearningRateScheduler, ModelCheckpoint as ModelCheckpoint, ProgbarLogger as ProgbarLogger, ReduceLROnPlateau as ReduceLROnPlateau, RemoteMonitor as RemoteMonitor, TensorBoard as TensorBoard, TerminateOnNaN as TerminateOnNaN
|
1
tensorflow-stubs/keras/constraints/__init__.pyi
Normal file
1
tensorflow-stubs/keras/constraints/__init__.pyi
Normal file
@ -0,0 +1 @@
|
||||
from keras.constraints import Constraint as Constraint, MaxNorm as MaxNorm, MinMaxNorm as MinMaxNorm, NonNeg as NonNeg, RadialConstraint as RadialConstraint, UnitNorm as UnitNorm, deserialize as deserialize, get as get, serialize as serialize
|
1
tensorflow-stubs/keras/datasets/__init__.pyi
Normal file
1
tensorflow-stubs/keras/datasets/__init__.pyi
Normal file
@ -0,0 +1 @@
|
||||
from keras.api._v2.keras.datasets import boston_housing as boston_housing, cifar10 as cifar10, cifar100 as cifar100, fashion_mnist as fashion_mnist, imdb as imdb, mnist as mnist, reuters as reuters
|
@ -0,0 +1 @@
|
||||
from keras.datasets.boston_housing import load_data as load_data
|
1
tensorflow-stubs/keras/datasets/cifar10/__init__.pyi
Normal file
1
tensorflow-stubs/keras/datasets/cifar10/__init__.pyi
Normal file
@ -0,0 +1 @@
|
||||
from keras.datasets.cifar10 import load_data as load_data
|
1
tensorflow-stubs/keras/datasets/cifar100/__init__.pyi
Normal file
1
tensorflow-stubs/keras/datasets/cifar100/__init__.pyi
Normal file
@ -0,0 +1 @@
|
||||
from keras.datasets.cifar100 import load_data as load_data
|
@ -0,0 +1 @@
|
||||
from keras.datasets.fashion_mnist import load_data as load_data
|
1
tensorflow-stubs/keras/datasets/imdb/__init__.pyi
Normal file
1
tensorflow-stubs/keras/datasets/imdb/__init__.pyi
Normal file
@ -0,0 +1 @@
|
||||
from keras.datasets.imdb import get_word_index as get_word_index, load_data as load_data
|
1
tensorflow-stubs/keras/datasets/mnist/__init__.pyi
Normal file
1
tensorflow-stubs/keras/datasets/mnist/__init__.pyi
Normal file
@ -0,0 +1 @@
|
||||
from keras.datasets.mnist import load_data as load_data
|
1
tensorflow-stubs/keras/datasets/reuters/__init__.pyi
Normal file
1
tensorflow-stubs/keras/datasets/reuters/__init__.pyi
Normal file
@ -0,0 +1 @@
|
||||
from keras.datasets.reuters import get_word_index as get_word_index, load_data as load_data
|
1
tensorflow-stubs/keras/dtensor/__init__.pyi
Normal file
1
tensorflow-stubs/keras/dtensor/__init__.pyi
Normal file
@ -0,0 +1 @@
|
||||
from keras.api._v2.keras.dtensor import experimental as experimental
|
2
tensorflow-stubs/keras/dtensor/experimental/__init__.pyi
Normal file
2
tensorflow-stubs/keras/dtensor/experimental/__init__.pyi
Normal file
@ -0,0 +1,2 @@
|
||||
from keras.api._v2.keras.dtensor.experimental import optimizers as optimizers
|
||||
from keras.dtensor.layout_map import LayoutMap as LayoutMap, layout_map_scope as layout_map_scope
|
@ -0,0 +1 @@
|
||||
from keras.dtensor.optimizers import Adadelta as Adadelta, Adagrad as Adagrad, Adam as Adam, AdamW as AdamW, RMSprop as RMSprop, SGD as SGD
|
0
tensorflow-stubs/keras/estimator/__init__.pyi
Normal file
0
tensorflow-stubs/keras/estimator/__init__.pyi
Normal file
4
tensorflow-stubs/keras/experimental/__init__.pyi
Normal file
4
tensorflow-stubs/keras/experimental/__init__.pyi
Normal file
@ -0,0 +1,4 @@
|
||||
from keras.feature_column.sequence_feature_column import SequenceFeatures as SequenceFeatures
|
||||
from keras.optimizers.schedules.learning_rate_schedule import CosineDecay as CosineDecay, CosineDecayRestarts as CosineDecayRestarts
|
||||
from keras.premade_models.linear import LinearModel as LinearModel
|
||||
from keras.premade_models.wide_deep import WideDeepModel as WideDeepModel
|
2
tensorflow-stubs/keras/initializers/__init__.pyi
Normal file
2
tensorflow-stubs/keras/initializers/__init__.pyi
Normal file
@ -0,0 +1,2 @@
|
||||
from keras.initializers import deserialize as deserialize, get as get, serialize as serialize
|
||||
from keras.initializers.initializers_v2 import Constant as Constant, GlorotNormal as GlorotNormal, GlorotUniform as GlorotUniform, HeNormal as HeNormal, HeUniform as HeUniform, Identity as Identity, Initializer as Initializer, LecunNormal as LecunNormal, LecunUniform as LecunUniform, Ones as Ones, Orthogonal as Orthogonal, RandomNormal as RandomNormal, RandomUniform as RandomUniform, TruncatedNormal as TruncatedNormal, VarianceScaling as VarianceScaling, Zeros as Zeros
|
97
tensorflow-stubs/keras/layers/__init__.pyi
Normal file
97
tensorflow-stubs/keras/layers/__init__.pyi
Normal file
@ -0,0 +1,97 @@
|
||||
from keras.api._v2.keras.layers import experimental as experimental
|
||||
from keras.engine.base_layer import Layer as Layer
|
||||
from keras.engine.input_layer import Input as Input, InputLayer as InputLayer
|
||||
from keras.engine.input_spec import InputSpec as InputSpec
|
||||
from keras.feature_column.dense_features_v2 import DenseFeatures as DenseFeatures
|
||||
from keras.layers.activation.elu import ELU as ELU
|
||||
from keras.layers.activation.leaky_relu import LeakyReLU as LeakyReLU
|
||||
from keras.layers.activation.prelu import PReLU as PReLU
|
||||
from keras.layers.activation.relu import ReLU as ReLU
|
||||
from keras.layers.activation.softmax import Softmax as Softmax
|
||||
from keras.layers.activation.thresholded_relu import ThresholdedReLU as ThresholdedReLU
|
||||
from keras.layers.attention.additive_attention import AdditiveAttention as AdditiveAttention
|
||||
from keras.layers.attention.attention import Attention as Attention
|
||||
from keras.layers.attention.multi_head_attention import MultiHeadAttention as MultiHeadAttention
|
||||
from keras.layers.convolutional.conv1d import Conv1D as Conv1D
|
||||
from keras.layers.convolutional.conv1d_transpose import Conv1DTranspose as Conv1DTranspose
|
||||
from keras.layers.convolutional.conv2d import Conv2D as Conv2D
|
||||
from keras.layers.convolutional.conv2d_transpose import Conv2DTranspose as Conv2DTranspose
|
||||
from keras.layers.convolutional.conv3d import Conv3D as Conv3D
|
||||
from keras.layers.convolutional.conv3d_transpose import Conv3DTranspose as Conv3DTranspose
|
||||
from keras.layers.convolutional.depthwise_conv1d import DepthwiseConv1D as DepthwiseConv1D
|
||||
from keras.layers.convolutional.depthwise_conv2d import DepthwiseConv2D as DepthwiseConv2D
|
||||
from keras.layers.convolutional.separable_conv1d import SeparableConv1D as SeparableConv1D
|
||||
from keras.layers.convolutional.separable_conv2d import SeparableConv2D as SeparableConv2D
|
||||
from keras.layers.core.activation import Activation as Activation
|
||||
from keras.layers.core.dense import Dense as Dense
|
||||
from keras.layers.core.einsum_dense import EinsumDense as EinsumDense
|
||||
from keras.layers.core.embedding import Embedding as Embedding
|
||||
from keras.layers.core.lambda_layer import Lambda as Lambda
|
||||
from keras.layers.core.masking import Masking as Masking
|
||||
from keras.layers.locally_connected.locally_connected1d import LocallyConnected1D as LocallyConnected1D
|
||||
from keras.layers.locally_connected.locally_connected2d import LocallyConnected2D as LocallyConnected2D
|
||||
from keras.layers.merging.add import Add as Add, add as add
|
||||
from keras.layers.merging.average import Average as Average, average as average
|
||||
from keras.layers.merging.concatenate import Concatenate as Concatenate, concatenate as concatenate
|
||||
from keras.layers.merging.dot import Dot as Dot, dot as dot
|
||||
from keras.layers.merging.maximum import Maximum as Maximum, maximum as maximum
|
||||
from keras.layers.merging.minimum import Minimum as Minimum, minimum as minimum
|
||||
from keras.layers.merging.multiply import Multiply as Multiply, multiply as multiply
|
||||
from keras.layers.merging.subtract import Subtract as Subtract, subtract as subtract
|
||||
from keras.layers.normalization.batch_normalization import BatchNormalization as BatchNormalization
|
||||
from keras.layers.normalization.layer_normalization import LayerNormalization as LayerNormalization
|
||||
from keras.layers.normalization.unit_normalization import UnitNormalization as UnitNormalization
|
||||
from keras.layers.pooling.average_pooling1d import AveragePooling1D as AveragePooling1D
|
||||
from keras.layers.pooling.average_pooling2d import AveragePooling2D as AveragePooling2D
|
||||
from keras.layers.pooling.average_pooling3d import AveragePooling3D as AveragePooling3D
|
||||
from keras.layers.pooling.global_average_pooling1d import GlobalAveragePooling1D as GlobalAveragePooling1D
|
||||
from keras.layers.pooling.global_average_pooling2d import GlobalAveragePooling2D as GlobalAveragePooling2D
|
||||
from keras.layers.pooling.global_average_pooling3d import GlobalAveragePooling3D as GlobalAveragePooling3D
|
||||
from keras.layers.pooling.global_max_pooling1d import GlobalMaxPooling1D as GlobalMaxPooling1D
|
||||
from keras.layers.pooling.global_max_pooling2d import GlobalMaxPooling2D as GlobalMaxPooling2D
|
||||
from keras.layers.pooling.global_max_pooling3d import GlobalMaxPooling3D as GlobalMaxPooling3D
|
||||
from keras.layers.pooling.max_pooling1d import MaxPooling1D as MaxPooling1D
|
||||
from keras.layers.pooling.max_pooling2d import MaxPooling2D as MaxPooling2D
|
||||
from keras.layers.pooling.max_pooling3d import MaxPooling3D as MaxPooling3D
|
||||
from keras.layers.preprocessing.category_encoding import CategoryEncoding as CategoryEncoding
|
||||
from keras.layers.preprocessing.discretization import Discretization as Discretization
|
||||
from keras.layers.preprocessing.hashing import Hashing as Hashing
|
||||
from keras.layers.preprocessing.image_preprocessing import CenterCrop as CenterCrop, RandomBrightness as RandomBrightness, RandomContrast as RandomContrast, RandomCrop as RandomCrop, RandomFlip as RandomFlip, RandomHeight as RandomHeight, RandomRotation as RandomRotation, RandomTranslation as RandomTranslation, RandomWidth as RandomWidth, RandomZoom as RandomZoom, Rescaling as Rescaling, Resizing as Resizing
|
||||
from keras.layers.preprocessing.integer_lookup import IntegerLookup as IntegerLookup
|
||||
from keras.layers.preprocessing.normalization import Normalization as Normalization
|
||||
from keras.layers.preprocessing.string_lookup import StringLookup as StringLookup
|
||||
from keras.layers.preprocessing.text_vectorization import TextVectorization as TextVectorization
|
||||
from keras.layers.regularization.activity_regularization import ActivityRegularization as ActivityRegularization
|
||||
from keras.layers.regularization.alpha_dropout import AlphaDropout as AlphaDropout
|
||||
from keras.layers.regularization.dropout import Dropout as Dropout
|
||||
from keras.layers.regularization.gaussian_dropout import GaussianDropout as GaussianDropout
|
||||
from keras.layers.regularization.gaussian_noise import GaussianNoise as GaussianNoise
|
||||
from keras.layers.regularization.spatial_dropout1d import SpatialDropout1D as SpatialDropout1D
|
||||
from keras.layers.regularization.spatial_dropout2d import SpatialDropout2D as SpatialDropout2D
|
||||
from keras.layers.regularization.spatial_dropout3d import SpatialDropout3D as SpatialDropout3D
|
||||
from keras.layers.reshaping.cropping1d import Cropping1D as Cropping1D
|
||||
from keras.layers.reshaping.cropping2d import Cropping2D as Cropping2D
|
||||
from keras.layers.reshaping.cropping3d import Cropping3D as Cropping3D
|
||||
from keras.layers.reshaping.flatten import Flatten as Flatten
|
||||
from keras.layers.reshaping.permute import Permute as Permute
|
||||
from keras.layers.reshaping.repeat_vector import RepeatVector as RepeatVector
|
||||
from keras.layers.reshaping.reshape import Reshape as Reshape
|
||||
from keras.layers.reshaping.up_sampling1d import UpSampling1D as UpSampling1D
|
||||
from keras.layers.reshaping.up_sampling2d import UpSampling2D as UpSampling2D
|
||||
from keras.layers.reshaping.up_sampling3d import UpSampling3D as UpSampling3D
|
||||
from keras.layers.reshaping.zero_padding1d import ZeroPadding1D as ZeroPadding1D
|
||||
from keras.layers.reshaping.zero_padding2d import ZeroPadding2D as ZeroPadding2D
|
||||
from keras.layers.reshaping.zero_padding3d import ZeroPadding3D as ZeroPadding3D
|
||||
from keras.layers.rnn.abstract_rnn_cell import AbstractRNNCell as AbstractRNNCell
|
||||
from keras.layers.rnn.base_rnn import RNN as RNN
|
||||
from keras.layers.rnn.base_wrapper import Wrapper as Wrapper
|
||||
from keras.layers.rnn.bidirectional import Bidirectional as Bidirectional
|
||||
from keras.layers.rnn.conv_lstm1d import ConvLSTM1D as ConvLSTM1D
|
||||
from keras.layers.rnn.conv_lstm2d import ConvLSTM2D as ConvLSTM2D
|
||||
from keras.layers.rnn.conv_lstm3d import ConvLSTM3D as ConvLSTM3D
|
||||
from keras.layers.rnn.gru import GRU as GRU, GRUCell as GRUCell
|
||||
from keras.layers.rnn.lstm import LSTM as LSTM, LSTMCell as LSTMCell
|
||||
from keras.layers.rnn.simple_rnn import SimpleRNN as SimpleRNN, SimpleRNNCell as SimpleRNNCell
|
||||
from keras.layers.rnn.stacked_rnn_cells import StackedRNNCells as StackedRNNCells
|
||||
from keras.layers.rnn.time_distributed import TimeDistributed as TimeDistributed
|
||||
from keras.layers.serialization import deserialize as deserialize, serialize as serialize
|
4
tensorflow-stubs/keras/layers/experimental/__init__.pyi
Normal file
4
tensorflow-stubs/keras/layers/experimental/__init__.pyi
Normal file
@ -0,0 +1,4 @@
|
||||
from keras.api._v2.keras.layers.experimental import preprocessing as preprocessing
|
||||
from keras.layers.core.einsum_dense import EinsumDense as EinsumDense
|
||||
from keras.layers.kernelized import RandomFourierFeatures as RandomFourierFeatures
|
||||
from keras.layers.normalization.batch_normalization import SyncBatchNormalization as SyncBatchNormalization
|
@ -0,0 +1,10 @@
|
||||
from keras.engine.base_preprocessing_layer import PreprocessingLayer as PreprocessingLayer
|
||||
from keras.layers.preprocessing.category_encoding import CategoryEncoding as CategoryEncoding
|
||||
from keras.layers.preprocessing.discretization import Discretization as Discretization
|
||||
from keras.layers.preprocessing.hashed_crossing import HashedCrossing as HashedCrossing
|
||||
from keras.layers.preprocessing.hashing import Hashing as Hashing
|
||||
from keras.layers.preprocessing.image_preprocessing import CenterCrop as CenterCrop, RandomContrast as RandomContrast, RandomCrop as RandomCrop, RandomFlip as RandomFlip, RandomHeight as RandomHeight, RandomRotation as RandomRotation, RandomTranslation as RandomTranslation, RandomWidth as RandomWidth, RandomZoom as RandomZoom, Rescaling as Rescaling, Resizing as Resizing
|
||||
from keras.layers.preprocessing.integer_lookup import IntegerLookup as IntegerLookup
|
||||
from keras.layers.preprocessing.normalization import Normalization as Normalization
|
||||
from keras.layers.preprocessing.string_lookup import StringLookup as StringLookup
|
||||
from keras.layers.preprocessing.text_vectorization import TextVectorization as TextVectorization
|
1
tensorflow-stubs/keras/losses/__init__.pyi
Normal file
1
tensorflow-stubs/keras/losses/__init__.pyi
Normal file
@ -0,0 +1 @@
|
||||
from keras.losses import BinaryCrossentropy as BinaryCrossentropy, BinaryFocalCrossentropy as BinaryFocalCrossentropy, CategoricalCrossentropy as CategoricalCrossentropy, CategoricalHinge as CategoricalHinge, CosineSimilarity as CosineSimilarity, Hinge as Hinge, Huber as Huber, KLDivergence as KLDivergence, LogCosh as LogCosh, Loss as Loss, MeanAbsoluteError as MeanAbsoluteError, MeanAbsolutePercentageError as MeanAbsolutePercentageError, MeanSquaredError as MeanSquaredError, MeanSquaredLogarithmicError as MeanSquaredLogarithmicError, Poisson as Poisson, SparseCategoricalCrossentropy as SparseCategoricalCrossentropy, SquaredHinge as SquaredHinge, binary_crossentropy as binary_crossentropy, binary_focal_crossentropy as binary_focal_crossentropy, categorical_crossentropy as categorical_crossentropy, categorical_hinge as categorical_hinge, cosine_similarity as cosine_similarity, deserialize as deserialize, get as get, hinge as hinge, huber as huber, kl_divergence as kl_divergence, log_cosh as log_cosh, mean_absolute_error as mean_absolute_error, mean_absolute_percentage_error as mean_absolute_percentage_error, mean_squared_error as mean_squared_error, mean_squared_logarithmic_error as mean_squared_logarithmic_error, poisson as poisson, serialize as serialize, sparse_categorical_crossentropy as sparse_categorical_crossentropy, squared_hinge as squared_hinge
|
4
tensorflow-stubs/keras/metrics/__init__.pyi
Normal file
4
tensorflow-stubs/keras/metrics/__init__.pyi
Normal file
@ -0,0 +1,4 @@
|
||||
from keras.losses import binary_crossentropy as binary_crossentropy, binary_focal_crossentropy as binary_focal_crossentropy, categorical_crossentropy as categorical_crossentropy, hinge as hinge, kl_divergence as kl_divergence, log_cosh as log_cosh, mean_absolute_error as mean_absolute_error, mean_absolute_percentage_error as mean_absolute_percentage_error, mean_squared_error as mean_squared_error, mean_squared_logarithmic_error as mean_squared_logarithmic_error, poisson as poisson, sparse_categorical_crossentropy as sparse_categorical_crossentropy, squared_hinge as squared_hinge
|
||||
from keras.metrics import deserialize as deserialize, get as get, serialize as serialize
|
||||
from keras.metrics.base_metric import Mean as Mean, MeanMetricWrapper as MeanMetricWrapper, MeanTensor as MeanTensor, Metric as Metric, Sum as Sum
|
||||
from keras.metrics.metrics import AUC as AUC, Accuracy as Accuracy, BinaryAccuracy as BinaryAccuracy, BinaryCrossentropy as BinaryCrossentropy, BinaryIoU as BinaryIoU, CategoricalAccuracy as CategoricalAccuracy, CategoricalCrossentropy as CategoricalCrossentropy, CategoricalHinge as CategoricalHinge, CosineSimilarity as CosineSimilarity, FalseNegatives as FalseNegatives, FalsePositives as FalsePositives, Hinge as Hinge, IoU as IoU, KLDivergence as KLDivergence, LogCoshError as LogCoshError, MeanAbsoluteError as MeanAbsoluteError, MeanAbsolutePercentageError as MeanAbsolutePercentageError, MeanIoU as MeanIoU, MeanRelativeError as MeanRelativeError, MeanSquaredError as MeanSquaredError, MeanSquaredLogarithmicError as MeanSquaredLogarithmicError, OneHotIoU as OneHotIoU, OneHotMeanIoU as OneHotMeanIoU, Poisson as Poisson, Precision as Precision, PrecisionAtRecall as PrecisionAtRecall, Recall as Recall, RecallAtPrecision as RecallAtPrecision, RootMeanSquaredError as RootMeanSquaredError, SensitivityAtSpecificity as SensitivityAtSpecificity, SparseCategoricalAccuracy as SparseCategoricalAccuracy, SparseCategoricalCrossentropy as SparseCategoricalCrossentropy, SparseTopKCategoricalAccuracy as SparseTopKCategoricalAccuracy, SpecificityAtSensitivity as SpecificityAtSensitivity, SquaredHinge as SquaredHinge, TopKCategoricalAccuracy as TopKCategoricalAccuracy, TrueNegatives as TrueNegatives, TruePositives as TruePositives, binary_accuracy as binary_accuracy, categorical_accuracy as categorical_accuracy, sparse_categorical_accuracy as sparse_categorical_accuracy, sparse_top_k_categorical_accuracy as sparse_top_k_categorical_accuracy, top_k_categorical_accuracy as top_k_categorical_accuracy
|
1
tensorflow-stubs/keras/mixed_precision/__init__.pyi
Normal file
1
tensorflow-stubs/keras/mixed_precision/__init__.pyi
Normal file
@ -0,0 +1 @@
|
||||
from keras.mixed_precision.policy import Policy as Policy, global_policy as global_policy, set_global_policy as set_global_policy
|
6
tensorflow-stubs/keras/models/__init__.pyi
Normal file
6
tensorflow-stubs/keras/models/__init__.pyi
Normal file
@ -0,0 +1,6 @@
|
||||
from keras.api._v2.keras.models import experimental as experimental
|
||||
from keras.engine.sequential import Sequential as Sequential
|
||||
from keras.engine.training import Model as Model
|
||||
from keras.models.cloning import clone_model as clone_model
|
||||
from keras.saving.model_config import model_from_config as model_from_config, model_from_json as model_from_json, model_from_yaml as model_from_yaml
|
||||
from keras.saving.save import load_model as load_model, save_model as save_model
|
1
tensorflow-stubs/keras/models/experimental/__init__.pyi
Normal file
1
tensorflow-stubs/keras/models/experimental/__init__.pyi
Normal file
@ -0,0 +1 @@
|
||||
from keras.models.sharpness_aware_minimization import SharpnessAwareMinimization as SharpnessAwareMinimization
|
10
tensorflow-stubs/keras/optimizers/__init__.pyi
Normal file
10
tensorflow-stubs/keras/optimizers/__init__.pyi
Normal file
@ -0,0 +1,10 @@
|
||||
from keras.api._v2.keras.optimizers import experimental as experimental, legacy as legacy, schedules as schedules
|
||||
from keras.optimizers import deserialize as deserialize, get as get, serialize as serialize
|
||||
from keras.optimizers.optimizer_v2.adadelta import Adadelta as Adadelta
|
||||
from keras.optimizers.optimizer_v2.adagrad import Adagrad as Adagrad
|
||||
from keras.optimizers.optimizer_v2.adam import Adam as Adam
|
||||
from keras.optimizers.optimizer_v2.adamax import Adamax as Adamax
|
||||
from keras.optimizers.optimizer_v2.ftrl import Ftrl as Ftrl
|
||||
from keras.optimizers.optimizer_v2.gradient_descent import SGD as SGD
|
||||
from keras.optimizers.optimizer_v2.nadam import Nadam as Nadam
|
||||
from keras.optimizers.optimizer_v2.rmsprop import RMSprop as RMSprop
|
10
tensorflow-stubs/keras/optimizers/experimental/__init__.pyi
Normal file
10
tensorflow-stubs/keras/optimizers/experimental/__init__.pyi
Normal file
@ -0,0 +1,10 @@
|
||||
from keras.optimizers.optimizer_experimental.adadelta import Adadelta as Adadelta
|
||||
from keras.optimizers.optimizer_experimental.adagrad import Adagrad as Adagrad
|
||||
from keras.optimizers.optimizer_experimental.adam import Adam as Adam
|
||||
from keras.optimizers.optimizer_experimental.adamax import Adamax as Adamax
|
||||
from keras.optimizers.optimizer_experimental.adamw import AdamW as AdamW
|
||||
from keras.optimizers.optimizer_experimental.ftrl import Ftrl as Ftrl
|
||||
from keras.optimizers.optimizer_experimental.nadam import Nadam as Nadam
|
||||
from keras.optimizers.optimizer_experimental.optimizer import Optimizer as Optimizer
|
||||
from keras.optimizers.optimizer_experimental.rmsprop import RMSprop as RMSprop
|
||||
from keras.optimizers.optimizer_experimental.sgd import SGD as SGD
|
9
tensorflow-stubs/keras/optimizers/legacy/__init__.pyi
Normal file
9
tensorflow-stubs/keras/optimizers/legacy/__init__.pyi
Normal file
@ -0,0 +1,9 @@
|
||||
from keras.optimizers.legacy.adadelta import Adadelta as Adadelta
|
||||
from keras.optimizers.legacy.adagrad import Adagrad as Adagrad
|
||||
from keras.optimizers.legacy.adam import Adam as Adam
|
||||
from keras.optimizers.legacy.adamax import Adamax as Adamax
|
||||
from keras.optimizers.legacy.ftrl import Ftrl as Ftrl
|
||||
from keras.optimizers.legacy.nadam import Nadam as Nadam
|
||||
from keras.optimizers.legacy.optimizer import Optimizer as Optimizer
|
||||
from keras.optimizers.legacy.rmsprop import RMSprop as RMSprop
|
||||
from keras.optimizers.legacy.sgd import SGD as SGD
|
1
tensorflow-stubs/keras/optimizers/schedules/__init__.pyi
Normal file
1
tensorflow-stubs/keras/optimizers/schedules/__init__.pyi
Normal file
@ -0,0 +1 @@
|
||||
from keras.optimizers.schedules.learning_rate_schedule import CosineDecay as CosineDecay, CosineDecayRestarts as CosineDecayRestarts, ExponentialDecay as ExponentialDecay, InverseTimeDecay as InverseTimeDecay, LearningRateSchedule as LearningRateSchedule, PiecewiseConstantDecay as PiecewiseConstantDecay, PolynomialDecay as PolynomialDecay, deserialize as deserialize, serialize as serialize
|
0
tensorflow-stubs/keras/premade/__init__.pyi
Normal file
0
tensorflow-stubs/keras/premade/__init__.pyi
Normal file
4
tensorflow-stubs/keras/preprocessing/__init__.pyi
Normal file
4
tensorflow-stubs/keras/preprocessing/__init__.pyi
Normal file
@ -0,0 +1,4 @@
|
||||
from keras.api._v2.keras.preprocessing import image as image, sequence as sequence, text as text
|
||||
from keras.utils.image_dataset import image_dataset_from_directory as image_dataset_from_directory
|
||||
from keras.utils.text_dataset import text_dataset_from_directory as text_dataset_from_directory
|
||||
from keras.utils.timeseries_dataset import timeseries_dataset_from_array as timeseries_dataset_from_array
|
2
tensorflow-stubs/keras/preprocessing/image/__init__.pyi
Normal file
2
tensorflow-stubs/keras/preprocessing/image/__init__.pyi
Normal file
@ -0,0 +1,2 @@
|
||||
from keras.preprocessing.image import DirectoryIterator as DirectoryIterator, ImageDataGenerator as ImageDataGenerator, Iterator as Iterator, NumpyArrayIterator as NumpyArrayIterator, apply_affine_transform as apply_affine_transform, apply_brightness_shift as apply_brightness_shift, apply_channel_shift as apply_channel_shift, random_brightness as random_brightness, random_channel_shift as random_channel_shift, random_rotation as random_rotation, random_shear as random_shear, random_shift as random_shift, random_zoom as random_zoom
|
||||
from keras.utils.image_utils import array_to_img as array_to_img, img_to_array as img_to_array, load_img as load_img, save_img as save_img, smart_resize as smart_resize
|
@ -0,0 +1,2 @@
|
||||
from keras.preprocessing.sequence import TimeseriesGenerator as TimeseriesGenerator, make_sampling_table as make_sampling_table, skipgrams as skipgrams
|
||||
from keras.utils.data_utils import pad_sequences as pad_sequences
|
1
tensorflow-stubs/keras/preprocessing/text/__init__.pyi
Normal file
1
tensorflow-stubs/keras/preprocessing/text/__init__.pyi
Normal file
@ -0,0 +1 @@
|
||||
from keras.preprocessing.text import Tokenizer as Tokenizer, hashing_trick as hashing_trick, one_hot as one_hot, text_to_word_sequence as text_to_word_sequence, tokenizer_from_json as tokenizer_from_json
|
1
tensorflow-stubs/keras/regularizers/__init__.pyi
Normal file
1
tensorflow-stubs/keras/regularizers/__init__.pyi
Normal file
@ -0,0 +1 @@
|
||||
from keras.regularizers import L1 as L1, L1L2 as L1L2, L2 as L2, OrthogonalRegularizer as OrthogonalRegularizer, Regularizer as Regularizer, deserialize as deserialize, get as get, l1_l2 as l1_l2, serialize as serialize
|
16
tensorflow-stubs/keras/utils/__init__.pyi
Normal file
16
tensorflow-stubs/keras/utils/__init__.pyi
Normal file
@ -0,0 +1,16 @@
|
||||
from keras.api._v2.keras.utils import experimental as experimental
|
||||
from keras.distribute.sidecar_evaluator import SidecarEvaluator as SidecarEvaluator
|
||||
from keras.engine.data_adapter import pack_x_y_sample_weight as pack_x_y_sample_weight, unpack_x_y_sample_weight as unpack_x_y_sample_weight
|
||||
from keras.utils.audio_dataset import audio_dataset_from_directory as audio_dataset_from_directory
|
||||
from keras.utils.data_utils import GeneratorEnqueuer as GeneratorEnqueuer, OrderedEnqueuer as OrderedEnqueuer, Sequence as Sequence, SequenceEnqueuer as SequenceEnqueuer, get_file as get_file, pad_sequences as pad_sequences
|
||||
from keras.utils.dataset_utils import split_dataset as split_dataset
|
||||
from keras.utils.generic_utils import CustomObjectScope as CustomObjectScope, Progbar as Progbar, deserialize_keras_object as deserialize_keras_object, get_custom_objects as get_custom_objects, get_registered_name as get_registered_name, get_registered_object as get_registered_object, register_keras_serializable as register_keras_serializable, serialize_keras_object as serialize_keras_object
|
||||
from keras.utils.image_dataset import image_dataset_from_directory as image_dataset_from_directory
|
||||
from keras.utils.image_utils import array_to_img as array_to_img, img_to_array as img_to_array, load_img as load_img, save_img as save_img
|
||||
from keras.utils.io_utils import disable_interactive_logging as disable_interactive_logging, enable_interactive_logging as enable_interactive_logging, is_interactive_logging_enabled as is_interactive_logging_enabled
|
||||
from keras.utils.layer_utils import get_source_inputs as get_source_inputs
|
||||
from keras.utils.np_utils import normalize as normalize, to_categorical as to_categorical
|
||||
from keras.utils.text_dataset import text_dataset_from_directory as text_dataset_from_directory
|
||||
from keras.utils.tf_utils import set_random_seed as set_random_seed
|
||||
from keras.utils.timeseries_dataset import timeseries_dataset_from_array as timeseries_dataset_from_array
|
||||
from keras.utils.vis_utils import model_to_dot as model_to_dot, plot_model as plot_model
|
1
tensorflow-stubs/keras/utils/experimental/__init__.pyi
Normal file
1
tensorflow-stubs/keras/utils/experimental/__init__.pyi
Normal file
@ -0,0 +1 @@
|
||||
from keras.utils.dataset_creator import DatasetCreator as DatasetCreator
|
1
tensorflow-stubs/keras/wrappers/__init__.pyi
Normal file
1
tensorflow-stubs/keras/wrappers/__init__.pyi
Normal file
@ -0,0 +1 @@
|
||||
from keras.api._v2.keras.wrappers import scikit_learn as scikit_learn
|
@ -0,0 +1 @@
|
||||
from keras.wrappers.scikit_learn import KerasClassifier as KerasClassifier, KerasRegressor as KerasRegressor
|
1135
tensorflow-stubs/math/__init__.pyi
Executable file
1135
tensorflow-stubs/math/__init__.pyi
Executable file
File diff suppressed because it is too large
Load Diff
1
tensorflow-stubs/py.typed
Normal file
1
tensorflow-stubs/py.typed
Normal file
@ -0,0 +1 @@
|
||||
partial
|
3
tensorflow-stubs/python/__init__.pyi
Normal file
3
tensorflow-stubs/python/__init__.pyi
Normal file
@ -0,0 +1,3 @@
|
||||
from typing import Any
|
||||
|
||||
pywrap_tensorflow: Any
|
3
tensorflow-stubs/python/client/__init__.pyi
Normal file
3
tensorflow-stubs/python/client/__init__.pyi
Normal file
@ -0,0 +1,3 @@
|
||||
from typing import Any
|
||||
|
||||
pywrap_tensorflow: Any
|
3
tensorflow-stubs/python/ops/__init__.pyi
Normal file
3
tensorflow-stubs/python/ops/__init__.pyi
Normal file
@ -0,0 +1,3 @@
|
||||
from typing import Any
|
||||
|
||||
math_ops: Any
|
3
tensorflow-stubs/python/util/__init__.pyi
Normal file
3
tensorflow-stubs/python/util/__init__.pyi
Normal file
@ -0,0 +1,3 @@
|
||||
from typing import Any
|
||||
|
||||
compat = Any
|
10
tensorflow-stubs/train/__init__.pyi
Normal file
10
tensorflow-stubs/train/__init__.pyi
Normal file
@ -0,0 +1,10 @@
|
||||
from typing import Any
|
||||
|
||||
summary_iterator: Any
|
||||
AdamOptimizer: Any
|
||||
Coordinator: Any
|
||||
ExponentialMovingAverage: Any
|
||||
MomentumOptimizer: Any
|
||||
RMSPropOptimizer: Any
|
||||
QueueRunner: Any
|
||||
TrackableView: Any
|
0
tf_container/__init__.py
Normal file
0
tf_container/__init__.py
Normal file
@ -5,25 +5,39 @@ import os
|
||||
# import container_support as cs
|
||||
import argparse
|
||||
import json
|
||||
import pathlib
|
||||
import typing
|
||||
|
||||
import numpy as np
|
||||
import re
|
||||
import tensorflow as tf
|
||||
import zipfile
|
||||
# from tensorflow.keras import backend as K
|
||||
|
||||
import tensorflow.keras.losses
|
||||
|
||||
from tensorflow.keras import callbacks
|
||||
from tensorflow.keras.layers import Convolution2D
|
||||
from tensorflow.keras.layers import Conv2D
|
||||
from tensorflow.keras.layers import Dropout, Flatten, Dense
|
||||
from tensorflow.keras.layers import Input
|
||||
from tensorflow.keras.layers import Layer
|
||||
from tensorflow.keras.models import Model
|
||||
from tensorflow.keras.preprocessing.image import load_img, img_to_array
|
||||
from tensorflow.python.client import device_lib
|
||||
from numpy import typing as npt
|
||||
|
||||
MODEL_CATEGORICAL = "categorical"
|
||||
MODEL_LINEAR = "linear"
|
||||
|
||||
|
||||
def linear_bin(a: float, N: int = 15, offset: int = 1, R: float = 2.0):
|
||||
def linear_bin_speed_zone(a: int, N: int = 4) -> npt.NDArray[np.float64]:
|
||||
"""
|
||||
create a bin of length N
|
||||
"""
|
||||
arr = np.zeros(N)
|
||||
arr[a] = 1
|
||||
return arr
|
||||
|
||||
|
||||
def linear_bin(a: float, N: int = 15, offset: int = 1, R: float = 2.0) -> npt.NDArray[np.float64]:
|
||||
"""
|
||||
create a bin of length N
|
||||
map val A to range R
|
||||
@ -37,7 +51,7 @@ def linear_bin(a: float, N: int = 15, offset: int = 1, R: float = 2.0):
|
||||
return arr
|
||||
|
||||
|
||||
def clamp(n, min, max):
|
||||
def clamp(n: int, min: int, max: int) -> int:
|
||||
if n <= min:
|
||||
return min
|
||||
if n >= max:
|
||||
@ -45,25 +59,31 @@ def clamp(n, min, max):
|
||||
return n
|
||||
|
||||
|
||||
def get_data(root_dir, filename):
|
||||
print('load data from file ' + filename)
|
||||
def get_data(root_dir: pathlib.Path, filename: str) -> typing.List[typing.Any]:
|
||||
# print('load data from file ' + filename)
|
||||
d = json.load(open(os.path.join(root_dir, filename)))
|
||||
return [(d['user/angle']), root_dir, d['cam/image_array']]
|
||||
|
||||
|
||||
def get_data_speed_zone(root_dir, filename):
|
||||
print('load data from file ' + filename)
|
||||
d = json.load(open(os.path.join(root_dir, filename)))
|
||||
return [(d['speed_zone']), root_dir, d['cam/image_array']]
|
||||
|
||||
|
||||
numbers = re.compile(r'(\d+)')
|
||||
|
||||
|
||||
def unzip_file(root, f):
|
||||
def unzip_file(root: pathlib.Path, f: str) -> None:
|
||||
zip_ref = zipfile.ZipFile(os.path.join(root, f), 'r')
|
||||
zip_ref.extractall(root)
|
||||
zip_ref.close()
|
||||
|
||||
|
||||
def train(model_type: str, batch_size: int, slide_size: int, img_height: int, img_width: int, img_depth: int, horizon: int, drop: float):
|
||||
def train(model_type: str, record_field: str, batch_size: int, slide_size: int, img_height: int, img_width: int,
|
||||
img_depth: int, horizon: int, drop: float) -> None:
|
||||
# env = cs.TrainingEnvironment()
|
||||
|
||||
print(device_lib.list_local_devices())
|
||||
os.system('mkdir -p logs')
|
||||
|
||||
# ### Loading the files ###
|
||||
@ -75,27 +95,39 @@ def train(model_type: str, batch_size: int, slide_size: int, img_height: int, im
|
||||
for root, dirs, files in os.walk('/opt/ml/input/data/train'):
|
||||
for f in files:
|
||||
if f.endswith('.zip'):
|
||||
unzip_file(root, f)
|
||||
unzip_file(pathlib.Path(root), f)
|
||||
|
||||
if record_field == 'angle':
|
||||
output_name = 'angle_out'
|
||||
for root, dirs, files in os.walk('/opt/ml/input/data/train'):
|
||||
data.extend(
|
||||
[get_data(root, f) for f in sorted(files, key=str.lower) if f.startswith('record') and f.endswith('.json')])
|
||||
elif record_field == 'speed_zone':
|
||||
output_name = 'speed_zone_output'
|
||||
for root, dirs, files in os.walk('/opt/ml/input/data/train'):
|
||||
data.extend(
|
||||
[get_data_speed_zone(root, f) for f in sorted(files, key=str.lower) if f.startswith('record') and f.endswith('.json')])
|
||||
else:
|
||||
print(f"invalid record filed: {record_field}")
|
||||
return
|
||||
|
||||
# ### Loading throttle and angle ###
|
||||
# ### Loading values (angle or speed_zone) ###
|
||||
|
||||
angle = [d[0] for d in data]
|
||||
angle_array = np.array(angle)
|
||||
value = [d[0] for d in data]
|
||||
value_array = np.array(value)
|
||||
|
||||
# ### Loading images ###
|
||||
if horizon > 0:
|
||||
images = np.array([img_to_array(load_img(os.path.join(d[1], d[2])).crop((0, horizon, img_width, img_height))) for d in data], 'f')
|
||||
images = np.array(
|
||||
[img_to_array(load_img(os.path.join(d[1], d[2])).crop((0, horizon, img_width, img_height))) for d in data],
|
||||
'f')
|
||||
else:
|
||||
images = np.array([img_to_array(load_img(os.path.join(d[1], d[2]))) for d in data], 'f')
|
||||
|
||||
# slide images vs orders
|
||||
if slide_size > 0:
|
||||
images = images[:len(images) - slide_size]
|
||||
angle_array = angle_array[slide_size:]
|
||||
value_array = value_array[slide_size:]
|
||||
|
||||
# ### Start training ###
|
||||
from datetime import datetime
|
||||
@ -114,19 +146,28 @@ def train(model_type: str, batch_size: int, slide_size: int, img_height: int, im
|
||||
model_filepath = '/opt/ml/model/model_other'
|
||||
if model_type == MODEL_CATEGORICAL:
|
||||
model_filepath = '/opt/ml/model/model_cat'
|
||||
angle_cat_array = np.array([linear_bin(float(a)) for a in angle_array])
|
||||
model = default_categorical(input_shape=(img_height - horizon, img_width, img_depth), drop=drop)
|
||||
loss = {'angle_out': 'categorical_crossentropy', }
|
||||
if record_field == 'angle':
|
||||
input_value_array = np.array([linear_bin(float(a)) for a in value_array])
|
||||
output_bin = 15
|
||||
elif record_field == 'speed_zone':
|
||||
input_value_array = np.array([linear_bin_speed_zone(a) for a in value_array])
|
||||
output_bin = 4
|
||||
model = default_categorical(input_shape=(img_height - horizon, img_width, img_depth), drop=drop,
|
||||
output_name=output_name, output_bin=output_bin)
|
||||
loss = {output_name: 'categorical_crossentropy', }
|
||||
optimizer = 'adam'
|
||||
elif model_type == MODEL_LINEAR:
|
||||
model_filepath = '/opt/ml/model/model_lin'
|
||||
angle_cat_array = np.array([a for a in angle_array])
|
||||
model = default_linear(input_shape=(img_height - horizon, img_width, img_depth), drop=drop)
|
||||
input_value_array = np.array([a for a in value_array])
|
||||
model = default_linear(input_shape=(img_height - horizon, img_width, img_depth), drop=drop, output_name=output_name)
|
||||
loss = 'mse'
|
||||
optimizer = 'rmsprop'
|
||||
else:
|
||||
raise Exception("invalid model type")
|
||||
|
||||
# Display the model's architecture
|
||||
model.summary()
|
||||
|
||||
save_best = callbacks.ModelCheckpoint(model_filepath, monitor='val_loss', verbose=1,
|
||||
save_best_only=True, mode='min')
|
||||
early_stop = callbacks.EarlyStopping(monitor='val_loss',
|
||||
@ -140,13 +181,13 @@ def train(model_type: str, batch_size: int, slide_size: int, img_height: int, im
|
||||
|
||||
model.compile(optimizer=optimizer,
|
||||
loss=loss,)
|
||||
model.fit({'img_in': images}, {'angle_out': angle_cat_array, }, batch_size=batch_size,
|
||||
model.fit({'img_in': images}, {output_name: input_value_array, }, batch_size=batch_size,
|
||||
epochs=100, verbose=1, validation_split=0.2, shuffle=True, callbacks=callbacks_list)
|
||||
|
||||
# Save model for tensorflow using
|
||||
model.save("/opt/ml/model/tfModel", save_format="tf")
|
||||
model.save(f'/opt/ml/model/model_{record_field.replace("_", "")}_{model_type}_{str(img_width)}x{str(img_height)}h{str(horizon)}')
|
||||
|
||||
def representative_dataset():
|
||||
def representative_dataset() -> typing.Generator[typing.List[float], typing.Any, None]:
|
||||
for d in tf.data.Dataset.from_tensor_slices(images).batch(1).take(100):
|
||||
yield [tf.dtypes.cast(d, tf.float32)]
|
||||
|
||||
@ -163,12 +204,13 @@ def train(model_type: str, batch_size: int, slide_size: int, img_height: int, im
|
||||
tflite_model = converter.convert()
|
||||
|
||||
# Save the model.
|
||||
with open('/opt/ml/model/model_' + model_type + '_' + str(img_width) + 'x' + str(img_height) + 'h' + str(horizon) + '.tflite',
|
||||
with open(f'/opt/ml/model/model_{record_field.replace("_", "")}_{model_type}_{str(img_width)}x{str(img_height)}h{str(horizon)}.tflite',
|
||||
'wb') as f:
|
||||
f.write(tflite_model)
|
||||
|
||||
|
||||
def conv2d(filters, kernel, strides, layer_num, activation='relu'):
|
||||
def conv2d(filters: float, kernel: typing.Union[int, typing.Tuple[int, int]], strides: typing.Union[int, typing.Tuple[int, int]], layer_num: int,
|
||||
activation: str = 'relu') -> Conv2D:
|
||||
"""
|
||||
Helper function to create a standard valid-padded convolutional layer
|
||||
with square kernel and strides and unified naming convention
|
||||
@ -179,14 +221,14 @@ def conv2d(filters, kernel, strides, layer_num, activation='relu'):
|
||||
:param activation: activation, defaults to relu
|
||||
:return: tf.keras Convolution2D layer
|
||||
"""
|
||||
return Convolution2D(filters=filters,
|
||||
return Conv2D(filters=filters,
|
||||
kernel_size=(kernel, kernel),
|
||||
strides=(strides, strides),
|
||||
activation=activation,
|
||||
name='conv2d_' + str(layer_num))
|
||||
|
||||
|
||||
def core_cnn_layers(img_in: Input, img_height: int, img_width: int, drop: float, l4_stride: int = 1):
|
||||
def core_cnn_layers(img_in: Input, img_height: int, img_width: int, drop: float, l4_stride: int = 1) -> Layer:
|
||||
"""
|
||||
Returns the core CNN layers that are shared among the different models,
|
||||
like linear, imu, behavioural
|
||||
@ -198,7 +240,7 @@ def core_cnn_layers(img_in: Input, img_height: int, img_width: int, drop: float,
|
||||
:return: stack of CNN layers
|
||||
"""
|
||||
x = img_in
|
||||
x = conv2d(img_height/5, 5, 2, 1)(x)
|
||||
x = conv2d(img_height / 5, 5, 2, 1)(x)
|
||||
x = Dropout(drop)(x)
|
||||
x = conv2d(img_width / 5, 5, 2, 2)(x)
|
||||
x = Dropout(drop)(x)
|
||||
@ -212,20 +254,22 @@ def core_cnn_layers(img_in: Input, img_height: int, img_width: int, drop: float,
|
||||
return x
|
||||
|
||||
|
||||
def default_linear(input_shape=(120, 160, 3), drop=0.2):
|
||||
def default_linear(input_shape: typing.Tuple[int, int, int] = (120, 160, 3), drop: float = 0.2,
|
||||
output_name: str ='angle_out') -> Model:
|
||||
img_in = Input(shape=input_shape, name='img_in')
|
||||
x = core_cnn_layers(img_in, img_width=input_shape[1], img_height=input_shape[0], drop=drop)
|
||||
x = Dense(100, activation='relu', name='dense_1')(x)
|
||||
x = Dropout(drop)(x)
|
||||
x = Dense(50, activation='relu', name='dense_2')(x)
|
||||
x = Dropout(drop)(x)
|
||||
angle_out = Dense(1, activation='linear', name='angle_out')(x)
|
||||
value_out = Dense(1, activation='linear', name=output_name)(x)
|
||||
|
||||
model = Model(inputs=[img_in], outputs=[angle_out], name='linear')
|
||||
model = Model(inputs=[img_in], outputs=[value_out], name='linear')
|
||||
return model
|
||||
|
||||
|
||||
def default_categorical(input_shape=(120, 160, 3), drop=0.2):
|
||||
def default_categorical(input_shape: typing.Tuple[int, int, int] = (120, 160, 3), drop: float = 0.2,
|
||||
output_name: str ='angle_out', output_bin: int = 15) -> Model:
|
||||
img_in = Input(shape=input_shape, name='img_in')
|
||||
x = core_cnn_layers(img_in, img_width=input_shape[1], img_height=input_shape[0], drop=drop, l4_stride=2)
|
||||
x = Dense(100, activation='relu', name="dense_1")(x)
|
||||
@ -233,13 +277,13 @@ def default_categorical(input_shape=(120, 160, 3), drop=0.2):
|
||||
x = Dense(50, activation='relu', name="dense_2")(x)
|
||||
x = Dropout(drop)(x)
|
||||
# Categorical output of the angle into 15 bins
|
||||
angle_out = Dense(15, activation='softmax', name='angle_out')(x)
|
||||
value_out = Dense(output_bin, activation='softmax', name=output_name)(x)
|
||||
|
||||
model = Model(inputs=[img_in], outputs=[angle_out], name='categorical')
|
||||
model = Model(inputs=[img_in], outputs=[value_out], name='categorical')
|
||||
return model
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument("--slide_size", type=int, default=0)
|
||||
@ -250,11 +294,13 @@ if __name__ == "__main__":
|
||||
parser.add_argument("--batch_size", type=int, default=32)
|
||||
parser.add_argument("--drop", type=float, default=0.2)
|
||||
parser.add_argument("--model_type", type=str, default=MODEL_CATEGORICAL)
|
||||
parser.add_argument("--record_field", type=str, choices=['angle', 'speed_zone'], default='angle')
|
||||
|
||||
args = parser.parse_args()
|
||||
params = vars(args)
|
||||
train(
|
||||
model_type=params["model_type"],
|
||||
record_field=params["record_field"],
|
||||
batch_size=params["batch_size"],
|
||||
slide_size=params["slide_size"],
|
||||
img_height=params["img_height"],
|
||||
@ -263,3 +309,7 @@ if __name__ == "__main__":
|
||||
horizon=params["horizon"],
|
||||
drop=params["drop"],
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
Loading…
Reference in New Issue
Block a user