We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
%load_ext tensorboard #we need imgaug 0.4 for image augmentations to work properly, see https://stackoverflow.com/questions/62580797/in-colab-doing-image-data-augmentation-with-imgaug-is-not-working-as-intended !pip uninstall -y imgaug && pip uninstall -y albumentations && pip install imgaug==0.4 !pip install --upgrade --no-cache-dir gdown !git clone -b legacy-yolov2 https://github.com/AIWintermuteAI/aXeleRate.git import sys sys.path.append('/content/aXeleRate') from axelerate import setup_training, setup_inference
config = { "model":{ "type": "Detector", "architecture": "MobileNet7_5", "input_size": 224, "anchors": [[[0.76120044, 0.57155991], [0.6923348, 0.88535553], [0.47163042, 0.34163313]]], "labels": ["licence"], "obj_thresh" : 0.7, "iou_thresh" : 0.5, "coord_scale" : 1.0, "class_scale" : 1.0, "object_scale" : 5.0, "no_object_scale" : 1.0 }, "weights" : { "full": "", "backend": "imagenet" }, "train" : { "actual_epoch": 100, "train_image_folder": "/content/MyDrive/MyDrive/ALPR/im", "train_annot_folder": "/content/MyDrive/MyDrive/ALPR/images", "train_times": 1, "valid_image_folder": "/content/MyDrive/MyDrive/ALPR/validation", "valid_annot_folder": "/content/MyDrive/MyDrive/ALPR/validation_annot", "valid_times": 1, "valid_metric": "recall", "batch_size": 4, "learning_rate": 1e-4, "saved_folder": F"/content/MyDrive/MyDrive/ALPR/ww44p3_result", "first_trainable_layer": "", "augmentation": True, "is_only_detect" : False }, "converter" : { "type": ["k210"] } }
Expected to be able to train the model
ValueError Traceback (most recent call last) <ipython-input-14-bbd5d1611139> in <cell line: 3>() 1 from keras import backend as K 2 K.clear_session() ----> 3 model_path = setup_training(config_dict=config) 7 frames /content/aXeleRate/axelerate/train.py in setup_training(config_file, config_dict) 163 os.makedirs(dirname) 164 --> 165 return(train_from_config(config, dirname)) 166 167 /content/aXeleRate/axelerate/train.py in train_from_config(config, project_folder) 114 115 # 1. Construct the model --> 116 yolo = create_yolo(config['model']['architecture'], 117 labels, 118 input_size, /content/aXeleRate/axelerate/networks/yolo/frontend.py in create_yolo(architecture, labels, input_size, anchors, coord_scale, class_scale, object_scale, no_object_scale, weights) 34 n_classes = len(labels) 35 n_boxes = int(len(anchors)/2) ---> 36 yolo_network = create_yolo_network(architecture, input_size, n_classes, n_boxes, weights) 37 yolo_loss = YoloLoss(yolo_network.get_grid_size(), 38 n_classes, /content/aXeleRate/axelerate/networks/yolo/backend/network.py in create_yolo_network(architecture, input_size, nb_classes, nb_box, weights) 14 weights): 15 feature_extractor = create_feature_extractor(architecture, input_size, weights) ---> 16 yolo_net = YoloNetwork(feature_extractor, 17 input_size, 18 nb_classes, /content/aXeleRate/axelerate/networks/yolo/backend/network.py in __init__(self, feature_extractor, input_size, nb_classes, nb_box) 33 34 # make the object detection layer ---> 35 output_tensor = Conv2D(nb_box * (4 + 1 + nb_classes), (1,1), strides=(1,1), 36 padding='same', 37 name='detection_layer_{}'.format(nb_box * (4 + 1 + nb_classes)), /usr/local/lib/python3.10/dist-packages/keras/src/dtensor/utils.py in _wrap_function(layer_instance, *args, **kwargs) 94 layout_args[variable_name + "_layout"] = layout 95 ---> 96 init_method(layer_instance, *args, **kwargs) 97 98 # Inject the layout parameter after the invocation of __init__() /usr/local/lib/python3.10/dist-packages/keras/src/layers/convolutional/conv2d.py in __init__(self, filters, kernel_size, strides, padding, data_format, dilation_rate, groups, activation, use_bias, kernel_initializer, bias_initializer, kernel_regularizer, bias_regularizer, activity_regularizer, kernel_constraint, bias_constraint, **kwargs) 177 **kwargs 178 ): --> 179 super().__init__( 180 rank=2, 181 filters=filters, /usr/local/lib/python3.10/dist-packages/keras/src/layers/convolutional/base_conv.py in __init__(self, rank, filters, kernel_size, strides, padding, data_format, dilation_rate, groups, activation, use_bias, kernel_initializer, bias_initializer, kernel_regularizer, bias_regularizer, activity_regularizer, kernel_constraint, bias_constraint, trainable, name, conv_op, **kwargs) 127 filters = int(filters) 128 if filters is not None and filters <= 0: --> 129 raise ValueError( 130 "Invalid value for argument `filters`. " 131 "Expected a strictly positive value. " ValueError: Invalid value for argument `filters`. Expected a strictly positive value. Received filters=0. https://colab.research.google.com/drive/1ZVO7KDgd_X1aEVd7Jh6gAFDNlQLxhCiJ#scrollTo=AjQngERURAWI
The text was updated successfully, but these errors were encountered:
Hi, @AIWintermuteAI, I see your issues closed about implementing YOLOv3 on the Maix board. -> #63
Since there is an option to use yolov3 in your Readme, can you provide the pre-compiled firmware that has been deleted before?
Pre-compiled firmare
Sorry, something went wrong.
Hi @muaz919 ! Please create a separate issue as this is unrelated.
AIWintermuteAI
No branches or pull requests
Check if applicable
Describe the bug
Expected behavior
Expected to be able to train the model
Platform
Relevant log output
The text was updated successfully, but these errors were encountered: