Skip to content

Commit

Permalink
Merge pull request #30 from visionjo/devel
Browse files Browse the repository at this point in the history
  • Loading branch information
visionjo authored May 29, 2020
2 parents 29dba04 + 241da5b commit 07a16d0
Show file tree
Hide file tree
Showing 15 changed files with 2,330 additions and 48 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
3rdparty/

# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
Expand Down
360 changes: 314 additions & 46 deletions README.md

Large diffs are not rendered by default.

Binary file added docs/task-1-counts.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added docs/task-2-counts.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added docs/task-3-counts.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
121 changes: 121 additions & 0 deletions evaluation/evaluate.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
# -*- coding: utf-8 -*-

from __future__ import division, print_function

import argparse
import os
import glob
import pickle
import json
import pandas as pd
import numpy as np
import scipy.io
import torch
import torch.backends.cudnn as cudnn
from torch.utils.data import Dataset

######################################################################
# Options
# --------
if __name__ == '__main__':

parser = argparse.ArgumentParser(description='Testing')
parser.add_argument('--gpu_ids', default='0', type=str, help='gpu_ids: e.g. 0 0,1')
parser.add_argument('--test_feature_dir', default='/media/yuyin/10THD1/Kinship/fiw-mm/data/FIDs-MM-features', type=str, help='features of test data')
parser.add_argument('--test_list_p', default='/media/yuyin/10THD1/Kinship/fiw-mm/data/lists/test/probes.json', type=str, help='test list probe')
parser.add_argument('--test_list_g', default='/media/yuyin/10THD1/Kinship/fiw-mm/data/lists/test/gallery.json', type=str, help='test list gallery')
parser.add_argument('--batchsize', default=256, type=int, help='batchsize')
parser.add_argument('--save_name', default='Rank-k_mAP', type=str, help='file name for saveing results')

opt = parser.parse_args()

str_ids = opt.gpu_ids.split(',')

gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
gpu_ids.append(id)

# set gpu ids
if len(gpu_ids) > 0:
torch.cuda.set_device(gpu_ids[0])
cudnn.benchmark = True
use_gpu = torch.cuda.is_available()


######################################################################
# Data
# ---------
class Rfiw2020TestSet(Dataset):
def __init__(self, x):
if x == 'gallery':
with open(opt.test_list_g) as file:
self.imgs = json.load(file)
else:
self.imgs = []
with open(opt.test_list_p) as file:
probes = json.load(file)
for _, family_member_ind in probes.items():
self.imgs.append(family_member_ind)

def __len__(self):
return len(self.imgs)


######################################################################
# Load feature
# ---------
def get_gallery_feature_and_id(img_path):
feat_path = "/media/yuyin/10THD1/Kinship/fiw-mm/data/lists/test/gallery_features.npy"
feat_matrix = np.loadtxt(feat_path)

assert feat_matrix.shape[0] == len(img_path)
labels = np.zeros((feat_matrix.shape[0], 1)) # size (21951, 1)
for i, path in enumerate(img_path):
labels[i] = int(path.split('/')[0].split('F')[1])

return feat_matrix, labels

def get_probe_feature_and_id(img_path):
# size of probe img_path: 190
labels = []
features = []
for path in img_path:
label = int(path.split('/')[0].split('F')[1])
feat_path_per_probe = os.path.join(opt.test_feature_dir, path, "encodings.pkl")
with open(feat_path_per_probe, 'rb') as f:
feat = pickle.load(f)
for _, feats_per_probe in feat.items() :
features.append(feats_per_probe)
labels.append(label)

return np.asarray(features), np.asarray(labels).reshape(-1,1)

######################################################################
# Testing
# ---------
# Load data
image_datasets = {x: Rfiw2020TestSet(x) for x in ['gallery', 'query']}

print('-------test-----------')
# Load features
gallery_feature, gallery_label = get_gallery_feature_and_id(
image_datasets['gallery'].imgs)
print("gallery size:", gallery_feature.shape, gallery_label.shape)
query_feature, query_label = get_probe_feature_and_id(
image_datasets['query'].imgs)
# (4540, 512) (4540,)
print("query size:", query_feature.shape, query_label.shape)

# Save result
print('-->Save features to gallery_probe_features.npy')
result = {'gallery_f': gallery_feature, 'gallery_label': gallery_label,
'query_f' : query_feature, 'query_label': query_label}

np.save("gallery_probe_features.npy", result)


# Run evaluation_gpu.py
result = './%s_result.txt' % opt.save_name
os.system('python utils.py | tee -a %s' % result)
109 changes: 109 additions & 0 deletions evaluation/evaluate_template_based.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
# -*- coding: utf-8 -*-

from __future__ import division, print_function

import argparse
import os
import glob
import pickle
import json
import pandas as pd
import numpy as np
import scipy.io
import torch
import torch.backends.cudnn as cudnn
from torch.utils.data import Dataset

## Run the code with:
# python evaluate_template_based.py --fuse median-median --test_list_g gallery_features.json --test_list_p probe_features.json
# python evaluate_template_based.py --fuse mean-median
# python evaluate_template_based.py --fuse none-median


def fusion(all_features_per_template, fusion_method='median'):
all_features_per_template= np.asarray(all_features_per_template)
if fusion_method == 'median':
return np.median(all_features_per_template, 0)
elif fusion_method == 'mean':
return np.mean(all_features_per_template, 0)
else:
# print("No correct fusion method (i.e., mean/median) found.")
return all_features_per_template

def Rfiw2020TestSet(x, fusion_method='median-median'):
fuse_gallery, fuse_probe = fusion_method.split("-")
labels = []
features = []
assert x in ['gallery', 'query']
if x == 'gallery':
fuse = fuse_gallery
feat_list = opt.test_list_g
else:
fuse = fuse_probe
feat_list = opt.test_list_p

with open(feat_list) as file:
data = json.load(file)
for family_member_ind, all_features_per_template in data.items():
label = int(family_member_ind.split('/')[0].split('F')[1])
if fuse != "none":
feats = fusion(all_features_per_template, fuse)
features.append(feats)
labels.append(label)
else:
feats = all_features_per_template
for i in range(len(all_features_per_template)):
features.append(all_features_per_template[i])
labels.append(label)

return np.asarray(features), np.asarray(labels)


if __name__ == '__main__':

parser = argparse.ArgumentParser(description='Testing')
parser.add_argument('--gpu_ids', default='0', type=str, help='gpu_ids: e.g. 0 0,1')
parser.add_argument('--test_list_p', default='/media/yuyin/10THD1/Kinship/fiw-mm/data/lists/test/probe_features.json', type=str, help='test list probe')
parser.add_argument('--test_list_g', default='/media/yuyin/10THD1/Kinship/fiw-mm/data/lists/test/gallery_features.json', type=str, help='test list gallery')
parser.add_argument('--save_name', default='Rank-k_mAP', type=str, help='file name for saveing results')
parser.add_argument('--fuse', default='median-median', type=str, help='fuse method (median/mean/none) for gallery-query')

opt = parser.parse_args()

str_ids = opt.gpu_ids.split(',')

gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
gpu_ids.append(id)

# set gpu ids
if len(gpu_ids) > 0:
torch.cuda.set_device(gpu_ids[0])
cudnn.benchmark = True
use_gpu = torch.cuda.is_available()


######################################################################
# Testing
# ---------
print('-------test-----------')

## Load features
gallery_feature, gallery_label = Rfiw2020TestSet('gallery', opt.fuse)
print("gallery size:", gallery_feature.shape, gallery_label.shape)

query_feature, query_label = Rfiw2020TestSet('query', opt.fuse)
print("query size:", query_feature.shape, query_label.shape)

## Save result
print('-->Save features to gallery_probe_features.npy')
result = {'gallery_f': gallery_feature, 'gallery_label': gallery_label,
'query_f' : query_feature, 'query_label': query_label}
np.save("gallery_probe_features.npy", result)


## Run evaluation_gpu.py
result = './%s_result.txt' % opt.save_name
os.system('python utils.py | tee -a %s' % result)
118 changes: 118 additions & 0 deletions evaluation/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
import matplotlib.pyplot as plt
import numpy as np
import scipy.io
import torch
from sklearn.metrics.pairwise import cosine_similarity

#######################################################################
# Evaluate
def compute_scores(feature_seta, feature_setb):
query = feature_seta.view(-1, 1)

x1 = feature_setb.cpu().numpy()
x2 = query.cpu().numpy().reshape(1, -1)
return cosine_similarity(x1, x2, dense_output=True)


def make_prediction(scores):
# predict index
index = np.argsort(scores.squeeze(1)) # from small to large
return index[::-1]


def evaluate(features_probe, labels_probes, features_gallery, labels_gallery):
"""
Does end-to-end evaluation. Computes CMC
:param features_probe:
:param labels_probes:
:param features_gallery:
:param labels_gallery:
:return: CMC value?
"""
pass

scores = compute_scores(features_probe, features_gallery)

ranked_list_predicted = make_prediction(scores)

list_true_relatives = np.argwhere(labels_gallery == labels_probes)

cmc_tmp = compute_mAP(ranked_list_predicted, list_true_relatives)

return scores, ranked_list_predicted, cmc_tmp


def compute_mAP(predicted_indices, true_indices):
ap = 0
cmc = torch.IntTensor(len(predicted_indices)).zero_()
if not true_indices.size: # if empty
cmc[0] = -1
return ap, cmc

# find good_index index
ngood = len(true_indices)
mask = np.in1d(predicted_indices, true_indices)
rows_good = np.argwhere(mask == True)
rows_good = rows_good.flatten()

cmc[rows_good[0]:] = 1
for i in range(ngood):
d_recall = 1.0 / ngood
precision = (i + 1) * 1.0 / (rows_good[i] + 1)
if rows_good[i] != 0:
old_precision = i * 1.0 / rows_good[i]
else:
old_precision = 1.0
ap = ap + d_recall * (old_precision + precision) / 2

return ap, cmc


if __name__ == '__main__':
######################################################################
result = np.load("gallery_probe_features.npy")

gallery_feature = torch.FloatTensor(result.item().get('gallery_f'))
gallery_label = result.item().get('gallery_label')
print("gallery size:", gallery_feature.size(), gallery_label.shape)

query_feature = torch.FloatTensor(result.item().get('query_f'))
query_label = result.item().get('query_label')
print("query size:", query_feature.size(), query_label.shape)

query_feature = query_feature.cuda().squeeze(1)
gallery_feature = gallery_feature.cuda().squeeze(1)

## query-gallery
CMC = torch.IntTensor(gallery_label.shape[0]).zero_()
ap = 0.0
all_scores = []
all_predicts = []
for i in range(query_label.shape[0]):
scores, predicts, (ap_tmp, CMC_tmp) = evaluate(query_feature[i], query_label[i],
gallery_feature, gallery_label)
all_scores.append(scores.squeeze())
all_predicts.append(predicts)
if CMC_tmp[0] == -1:
continue
CMC = CMC + CMC_tmp
ap += ap_tmp

CMC = CMC.float()
CMC = CMC / query_label.shape[0] # average CMC
print('Rank@1:%f Rank@5:%f Rank@10:%f' % (CMC[0], CMC[4], CMC[9]))
print('Rank@10:%f Rank@20:%f Rank@50:%f' % (CMC[9], CMC[19], CMC[49]))
print('mAP:%f' % (ap / query_label.shape[0]))

# save all_scores to npy
predict_result = {'score': np.asarray(all_scores), 'predict': np.asarray(all_predicts)}
np.save("predict_result.npy", predict_result)

CMC = CMC.numpy()
fig, ax = plt.subplots()
plt.plot(CMC)
ax.set(xscale="log")
plt.xlim(0,1000)
plt.show()
fig.savefig('CMC_result.png')

Loading

0 comments on commit 07a16d0

Please sign in to comment.