You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I have trained promptPAR on my own dataset, But when I try to get prediciton on images it is giving same output / predciton every time.
What I opesered was the checkpoint dictionary is differnet from the weights I get after training. in training file you write line to use eval.py but this file is not present there. Please help in this regard. I am also sharing weights file
`import torch
import numpy as np
import os
import pprint
from collections import OrderedDict, defaultdict
import sys
import numpy as np
import torch
from torch.utils.data import DataLoader
import time
from torch import nn,optim
from batch_engine import valid_trainer
from config import argument_parser
from dataset.AttrDataset import MultiModalAttrDataset, get_transform
from loss.CE_loss import *
from models_2.base_block import *
from tools.function import get_pedestrian_metrics,get_signle_metrics
from tools.utils import time_str, save_ckpt, ReDirectSTD, set_seed, select_gpus
from solver import make_optimizer
from solver.scheduler_factory import create_scheduler,make_scheduler
from clip_2 import clip
from clip_2.model import *
num= 30
custom_attributes = [
'A pedestrian with long hair','A pedestrian with hat','A pedestrian wearing kamiz','A pedestrian wearing pant','A pedestrian with glasses',
'A pedestrian is male','A pedestrian with a backpack','A pedestrian with a hand bag','A pedestrian with a Fat body','A pedestrian is a kid',
'A pedestrian is a teenager','A pedestrian is a adult','A pedestrian is a old', 'A pedestrian with black upper body',
'A pedestrian with white upper body','A pedestrian with red upper body','A pedestrian with purple upper body','A pedestrian with yellow upper body',
'A pedestrian with gray upper body','A pedestrian with blue upper body','A pedestrian with green upper body',
'A pedestrian with black lower body','A pedestrian with white lower body','A pedestrian with pink lower body','A pedestrian with purple lower body',
'A pedestrian with yellow lower body','A pedestrian with gray lower body','A pedestrian with blue lower body','A pedestrian with green lower body',
'A pedestrian with brown lower body']
#checkpoint loading
checkpoint = torch.load("/UNITY-NFS/Data_B/OpenPAR/PromptPAR/logs/custom/2024-09-11_12_45_59/epoch43.pth")
checkpoint.keys()
clip_model = build_model(checkpoint['clip_model'])
model = TransformerClassifier(clip_model,num, custom_attributes)
main/PromptPAR/logs/PETA/2024-05-23_14_59_25/epoch21.pth
model.load_state_dict(checkpoint['model_state_dict'], strict=False)
if torch.cuda.is_available():
model = model.cuda()
clip_model = clip_model.cuda()
you can cancel the strict=False in your evaluation code and clip/model.py/bulid_model when loading the pre-trained weights. Then, check the parameters that were updated in training were loaded correctly. If you can't confirm that it's loading correctly, you can put up the logs.
I have trained promptPAR on my own dataset, But when I try to get prediciton on images it is giving same output / predciton every time.
What I opesered was the checkpoint dictionary is differnet from the weights I get after training. in training file you write line to use eval.py but this file is not present there. Please help in this regard. I am also sharing weights file
`import torch
import numpy as np
import os
import pprint
from collections import OrderedDict, defaultdict
import sys
import numpy as np
import torch
from torch.utils.data import DataLoader
import time
from torch import nn,optim
from batch_engine import valid_trainer
from config import argument_parser
from dataset.AttrDataset import MultiModalAttrDataset, get_transform
from loss.CE_loss import *
from models_2.base_block import *
from tools.function import get_pedestrian_metrics,get_signle_metrics
from tools.utils import time_str, save_ckpt, ReDirectSTD, set_seed, select_gpus
from solver import make_optimizer
from solver.scheduler_factory import create_scheduler,make_scheduler
from clip_2 import clip
from clip_2.model import *
num= 30
custom_attributes = [
'A pedestrian with long hair','A pedestrian with hat','A pedestrian wearing kamiz','A pedestrian wearing pant','A pedestrian with glasses',
'A pedestrian is male','A pedestrian with a backpack','A pedestrian with a hand bag','A pedestrian with a Fat body','A pedestrian is a kid',
'A pedestrian is a teenager','A pedestrian is a adult','A pedestrian is a old', 'A pedestrian with black upper body',
'A pedestrian with white upper body','A pedestrian with red upper body','A pedestrian with purple upper body','A pedestrian with yellow upper body',
'A pedestrian with gray upper body','A pedestrian with blue upper body','A pedestrian with green upper body',
'A pedestrian with black lower body','A pedestrian with white lower body','A pedestrian with pink lower body','A pedestrian with purple lower body',
'A pedestrian with yellow lower body','A pedestrian with gray lower body','A pedestrian with blue lower body','A pedestrian with green lower body',
'A pedestrian with brown lower body']
#checkpoint loading
checkpoint = torch.load("/UNITY-NFS/Data_B/OpenPAR/PromptPAR/logs/custom/2024-09-11_12_45_59/epoch43.pth")
checkpoint.keys()
clip_model = build_model(checkpoint['clip_model'])
model = TransformerClassifier(clip_model,num, custom_attributes)
main/PromptPAR/logs/PETA/2024-05-23_14_59_25/epoch21.pth
model.load_state_dict(checkpoint['model_state_dict'], strict=False)
if torch.cuda.is_available():
model = model.cuda()
clip_model = clip_model.cuda()
import cv2
img = cv2.imread("images/20240807114139768329_t.jpg")
from torchvision import transforms
preprocess = transforms.Compose([
transforms.ToPILImage(), # Convert NumPy array to PIL image (if using OpenCV)
transforms.Resize((224, 224)), # Resize to required input size
transforms.ToTensor(), # Convert PIL image to Tensor (CxHxW)
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), # Normalization
])
def valid_trainer_single(model, clip_model, img):
# model.eval()
model.eval()
clip_model.eval()
preds_probs = []
pred=valid_trainer_single(model, clip_model, img)
`
The text was updated successfully, but these errors were encountered: