1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586 |
- model:
- base_learning_rate: 4.5e-06
- target: taming.models.cond_transformer.Net2NetTransformer
- params:
- cond_stage_key: objects_bbox
- transformer_config:
- target: taming.modules.transformer.mingpt.GPT
- params:
- vocab_size: 8192
- block_size: 348 # = 256 + 92 = dim(vqgan_latent_space,16x16) + dim(conditional_builder.embedding_dim)
- n_layer: 36
- n_head: 16
- n_embd: 1536
- embd_pdrop: 0.1
- resid_pdrop: 0.1
- attn_pdrop: 0.1
- first_stage_config:
- target: taming.models.vqgan.VQModel
- params:
- ckpt_path: /path/to/coco_oi_epoch12.ckpt # https://heibox.uni-heidelberg.de/f/461d9a9f4fcf48ab84f4/
- embed_dim: 256
- n_embed: 8192
- ddconfig:
- double_z: false
- z_channels: 256
- resolution: 256
- in_channels: 3
- out_ch: 3
- ch: 128
- ch_mult:
- - 1
- - 1
- - 2
- - 2
- - 4
- num_res_blocks: 2
- attn_resolutions:
- - 16
- dropout: 0.0
- lossconfig:
- target: taming.modules.losses.DummyLoss
- cond_stage_config:
- target: taming.models.dummy_cond_stage.DummyCondStage
- params:
- conditional_key: objects_bbox
- data:
- target: main.DataModuleFromConfig
- params:
- batch_size: 6
- train:
- target: taming.data.annotated_objects_open_images.AnnotatedObjectsOpenImages
- params:
- data_path: data/open_images_annotations_100 # substitute with path to full dataset
- split: train
- keys: [image, objects_bbox, file_name, annotations]
- no_tokens: 8192
- target_image_size: 256
- category_allow_list_target: taming.data.open_images_helper.top_300_classes_plus_coco_compatibility
- category_mapping_target: taming.data.open_images_helper.open_images_unify_categories_for_coco
- min_object_area: 0.0001
- min_objects_per_image: 2
- max_objects_per_image: 30
- crop_method: random-2d
- random_flip: true
- use_group_parameter: true
- use_additional_parameters: true
- encode_crop: true
- validation:
- target: taming.data.annotated_objects_open_images.AnnotatedObjectsOpenImages
- params:
- data_path: data/open_images_annotations_100 # substitute with path to full dataset
- split: validation
- keys: [image, objects_bbox, file_name, annotations]
- no_tokens: 8192
- target_image_size: 256
- category_allow_list_target: taming.data.open_images_helper.top_300_classes_plus_coco_compatibility
- category_mapping_target: taming.data.open_images_helper.open_images_unify_categories_for_coco
- min_object_area: 0.0001
- min_objects_per_image: 2
- max_objects_per_image: 30
- crop_method: center
- random_flip: false
- use_group_parameter: true
- use_additional_parameters: true
- encode_crop: true
|