Hi,
I can now train a yolov6 model on a custom dataset with a modified version (seen below) of your config. But it only creates an archive with the .onnx file. How do I also make it create an archive with the RCV2 blob?
model:
name: barcode_detection_medium_512x384
predefined_model:
name: DetectionModel
params:
variant: medium
loader:
params:
dataset_name: dataset
dataset_dir: dataset
trainer:
precision: "16-mixed"
preprocessing:
train_image_size: [512, 384]
keep_aspect_ratio: true
normalize:
active: true
params:
mean: [0., 0., 0.]
std: [1, 1, 1]
augmentations:
- name: Rotate
params:
limit: 10
p: 0.5
border_mode: 0
value: [0, 0, 0]
- name: Rotate
params:
limit: 90
p: 0.1
border_mode: 0
value: [0, 0, 0]
- name: Affine
params:
scale: 1.0
translate_percent: 0.0
rotate: 0
shear: 5
interpolation: 1
mask_interpolation: 0
cval: 0
cval_mask: 0
mode: 0
fit_output: false
keep_ratio: false
rotate_method: largest_box
always_apply: false
p: 0.5
- name: RandomBrightnessContrast
params:
brightness_limit: 0.2
contrast_limit: 0.2
brightness_by_max: false
p: 0.5
- name: Defocus
params:
p: 0.1
batch_size: 32
epochs: &epochs 300
n_workers: 8
validation_interval: 10
n_log_images: 25
gradient_clip_val: 10
# resume_training: true # To continue from 569 epoch
callbacks:
- name: EMACallback
params:
decay: 0.9999
use_dynamic_decay: True
decay_tau: 2000
- name: ExportOnTrainEnd
- name: UploadCheckpoint
- name: TestOnTrainEnd
- name: GPUStatsMonitor
- name: ArchiveOnTrainEnd
- name: GradientAccumulationScheduler
params:
scheduling: # warmup phase is 3 epochs
0: 1
1: 1
2: 2 # For best results, always accumulate gradients to effectively use 64 batch size
- name: MetadataLogger
params:
hyperparams: ["trainer.epochs", trainer.batch_size, model.name]
exporter:
blobconverter:
active: true
shaves: 6
version: "2022.1"