input_process.poseEmbedding.weight: torch.Size([512, 263]) input_process.poseEmbedding.bias: torch.Size([512]) sequence_pos_encoder.pe: torch.Size([5000, 1, 512]) seqTransEncoder.layers.0.self_attn.in_proj_weight: torch.Size([1536, 512]) seqTransEncoder.layers.0.self_attn.in_proj_bias: torch.Size([1536]) seqTransEncoder.layers.0.self_attn.out_proj.weight: torch.Size([512, 512]) seqTransEncoder.layers.0.self_attn.out_proj.bias: torch.Size([512]) seqTransEncoder.layers.0.linear1.weight: torch.Size([1024, 512]) seqTransEncoder.layers.0.linear1.bias: torch.Size([1024]) seqTransEncoder.layers.0.linear2.weight: torch.Size([512, 1024]) seqTransEncoder.layers.0.linear2.bias: torch.Size([512]) seqTransEncoder.layers.0.norm1.weight: torch.Size([512]) seqTransEncoder.layers.0.norm1.bias: torch.Size([512]) seqTransEncoder.layers.0.norm2.weight: torch.Size([512]) seqTransEncoder.layers.0.norm2.bias: torch.Size([512]) seqTransEncoder.layers.1.self_attn.in_proj_weight: torch.Size([1536, 512]) seqTransEncoder.layers.1.self_attn.in_proj_bias: torch.Size([1536]) seqTransEncoder.layers.1.self_attn.out_proj.weight: torch.Size([512, 512]) seqTransEncoder.layers.1.self_attn.out_proj.bias: torch.Size([512]) seqTransEncoder.layers.1.linear1.weight: torch.Size([1024, 512]) seqTransEncoder.layers.1.linear1.bias: torch.Size([1024]) seqTransEncoder.layers.1.linear2.weight: torch.Size([512, 1024]) seqTransEncoder.layers.1.linear2.bias: torch.Size([512]) seqTransEncoder.layers.1.norm1.weight: torch.Size([512]) seqTransEncoder.layers.1.norm1.bias: torch.Size([512]) seqTransEncoder.layers.1.norm2.weight: torch.Size([512]) seqTransEncoder.layers.1.norm2.bias: torch.Size([512]) seqTransEncoder.layers.2.self_attn.in_proj_weight: torch.Size([1536, 512]) seqTransEncoder.layers.2.self_attn.in_proj_bias: torch.Size([1536]) seqTransEncoder.layers.2.self_attn.out_proj.weight: torch.Size([512, 512]) seqTransEncoder.layers.2.self_attn.out_proj.bias: torch.Size([512]) seqTransEncoder.layers.2.linear1.weight: torch.Size([1024, 512]) seqTransEncoder.layers.2.linear1.bias: torch.Size([1024]) seqTransEncoder.layers.2.linear2.weight: torch.Size([512, 1024]) seqTransEncoder.layers.2.linear2.bias: torch.Size([512]) seqTransEncoder.layers.2.norm1.weight: torch.Size([512]) seqTransEncoder.layers.2.norm1.bias: torch.Size([512]) seqTransEncoder.layers.2.norm2.weight: torch.Size([512]) seqTransEncoder.layers.2.norm2.bias: torch.Size([512]) seqTransEncoder.layers.3.self_attn.in_proj_weight: torch.Size([1536, 512]) seqTransEncoder.layers.3.self_attn.in_proj_bias: torch.Size([1536]) seqTransEncoder.layers.3.self_attn.out_proj.weight: torch.Size([512, 512]) seqTransEncoder.layers.3.self_attn.out_proj.bias: torch.Size([512]) seqTransEncoder.layers.3.linear1.weight: torch.Size([1024, 512]) seqTransEncoder.layers.3.linear1.bias: torch.Size([1024]) seqTransEncoder.layers.3.linear2.weight: torch.Size([512, 1024]) seqTransEncoder.layers.3.linear2.bias: torch.Size([512]) seqTransEncoder.layers.3.norm1.weight: torch.Size([512]) seqTransEncoder.layers.3.norm1.bias: torch.Size([512]) seqTransEncoder.layers.3.norm2.weight: torch.Size([512]) seqTransEncoder.layers.3.norm2.bias: torch.Size([512]) seqTransEncoder.layers.4.self_attn.in_proj_weight: torch.Size([1536, 512]) seqTransEncoder.layers.4.self_attn.in_proj_bias: torch.Size([1536]) seqTransEncoder.layers.4.self_attn.out_proj.weight: torch.Size([512, 512]) seqTransEncoder.layers.4.self_attn.out_proj.bias: torch.Size([512]) seqTransEncoder.layers.4.linear1.weight: torch.Size([1024, 512]) seqTransEncoder.layers.4.linear1.bias: torch.Size([1024]) seqTransEncoder.layers.4.linear2.weight: torch.Size([512, 1024]) seqTransEncoder.layers.4.linear2.bias: torch.Size([512]) seqTransEncoder.layers.4.norm1.weight: torch.Size([512]) seqTransEncoder.layers.4.norm1.bias: torch.Size([512]) seqTransEncoder.layers.4.norm2.weight: torch.Size([512]) seqTransEncoder.layers.4.norm2.bias: torch.Size([512]) seqTransEncoder.layers.5.self_attn.in_proj_weight: torch.Size([1536, 512]) seqTransEncoder.layers.5.self_attn.in_proj_bias: torch.Size([1536]) seqTransEncoder.layers.5.self_attn.out_proj.weight: torch.Size([512, 512]) seqTransEncoder.layers.5.self_attn.out_proj.bias: torch.Size([512]) seqTransEncoder.layers.5.linear1.weight: torch.Size([1024, 512]) seqTransEncoder.layers.5.linear1.bias: torch.Size([1024]) seqTransEncoder.layers.5.linear2.weight: torch.Size([512, 1024]) seqTransEncoder.layers.5.linear2.bias: torch.Size([512]) seqTransEncoder.layers.5.norm1.weight: torch.Size([512]) seqTransEncoder.layers.5.norm1.bias: torch.Size([512]) seqTransEncoder.layers.5.norm2.weight: torch.Size([512]) seqTransEncoder.layers.5.norm2.bias: torch.Size([512]) seqTransEncoder.layers.6.self_attn.in_proj_weight: torch.Size([1536, 512]) seqTransEncoder.layers.6.self_attn.in_proj_bias: torch.Size([1536]) seqTransEncoder.layers.6.self_attn.out_proj.weight: torch.Size([512, 512]) seqTransEncoder.layers.6.self_attn.out_proj.bias: torch.Size([512]) seqTransEncoder.layers.6.linear1.weight: torch.Size([1024, 512]) seqTransEncoder.layers.6.linear1.bias: torch.Size([1024]) seqTransEncoder.layers.6.linear2.weight: torch.Size([512, 1024]) seqTransEncoder.layers.6.linear2.bias: torch.Size([512]) seqTransEncoder.layers.6.norm1.weight: torch.Size([512]) seqTransEncoder.layers.6.norm1.bias: torch.Size([512]) seqTransEncoder.layers.6.norm2.weight: torch.Size([512]) seqTransEncoder.layers.6.norm2.bias: torch.Size([512]) seqTransEncoder.layers.7.self_attn.in_proj_weight: torch.Size([1536, 512]) seqTransEncoder.layers.7.self_attn.in_proj_bias: torch.Size([1536]) seqTransEncoder.layers.7.self_attn.out_proj.weight: torch.Size([512, 512]) seqTransEncoder.layers.7.self_attn.out_proj.bias: torch.Size([512]) seqTransEncoder.layers.7.linear1.weight: torch.Size([1024, 512]) seqTransEncoder.layers.7.linear1.bias: torch.Size([1024]) seqTransEncoder.layers.7.linear2.weight: torch.Size([512, 1024]) seqTransEncoder.layers.7.linear2.bias: torch.Size([512]) seqTransEncoder.layers.7.norm1.weight: torch.Size([512]) seqTransEncoder.layers.7.norm1.bias: torch.Size([512]) seqTransEncoder.layers.7.norm2.weight: torch.Size([512]) seqTransEncoder.layers.7.norm2.bias: torch.Size([512]) embed_timestep.sequence_pos_encoder.pe: torch.Size([5000, 1, 512]) embed_timestep.time_embed.0.weight: torch.Size([512, 512]) embed_timestep.time_embed.0.bias: torch.Size([512]) embed_timestep.time_embed.2.weight: torch.Size([512, 512]) embed_timestep.time_embed.2.bias: torch.Size([512]) embed_text.weight: torch.Size([512, 512]) embed_text.bias: torch.Size([512]) output_process.poseFinal.weight: torch.Size([263, 512]) output_process.poseFinal.bias: torch.Size([263])