-
Notifications
You must be signed in to change notification settings - Fork 116
/
Copy pathDSHSD.py
112 lines (85 loc) · 3.46 KB
/
DSHSD.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
from utils.tools import *
from network import *
import os
import torch
import torch.optim as optim
import time
import numpy as np
torch.multiprocessing.set_sharing_strategy('file_system')
# DSHSD(IEEE ACCESS 2019)
# paper [Deep Supervised Hashing Based on Stable Distribution](https://ieeexplore.ieee.org/document/8648432/)
# [DSHSDSD] epoch:70, bit:48, dataset:cifar10-1, MAP:0.809, Best MAP: 0.809
# [DSHSDSD] epoch:250, bit:48, dataset:nuswide_21, MAP:0.809, Best MAP: 0.815
# [DSHSDSD] epoch:135, bit:48, dataset:imagenet, MAP:0.647, Best MAP: 0.647
def get_config():
config = {
"alpha": 0.05,
"optimizer": {"type": optim.Adam, "optim_params": {"lr": 1e-5, "betas": (0.9, 0.999)}},
"info": "[DSHSDSD]",
"resize_size": 256,
"crop_size": 224,
"batch_size": 64,
"net": AlexNet,
# "net":ResNet,
# "dataset": "cifar10-1",
"dataset": "imagenet",
# "dataset": "nuswide_21",
"epoch": 250,
"test_map": 5,
# "device":torch.device("cpu"),
"device": torch.device("cuda:1"),
"bit_list": [48],
}
config = config_dataset(config)
return config
class DSHSDLoss(torch.nn.Module):
def __init__(self, config, bit):
super(DSHSDLoss, self).__init__()
self.m = 2 * bit
self.fc = torch.nn.Linear(bit, config["n_class"], bias=False).to(config["device"])
def forward(self, u, y, ind, config):
u = torch.tanh(u)
dist = (u.unsqueeze(1) - u.unsqueeze(0)).pow(2).sum(dim=2)
s = (y @ y.t() == 0).float()
Ld = (1 - s) / 2 * dist + s / 2 * (self.m - dist).clamp(min=0)
Ld = config["alpha"] * Ld.mean()
o = self.fc(u)
if "nuswide" in config["dataset"]:
# formula 8, multiple labels classification loss
Lc = (o - y * o + ((1 + (-o).exp()).log())).sum(dim=1).mean()
else:
# formula 7, single labels classification loss
Lc = (-o.softmax(dim=1).log() * y).sum(dim=1).mean()
return Lc + Ld
def train_val(config, bit):
device = config["device"]
train_loader, test_loader, dataset_loader, num_train, num_test, num_dataset = get_data(config)
config["num_train"] = num_train
net = config["net"](bit).to(device)
optimizer = config["optimizer"]["type"](net.parameters(), **(config["optimizer"]["optim_params"]))
criterion = DSHSDLoss(config, bit)
Best_mAP = 0
for epoch in range(config["epoch"]):
current_time = time.strftime('%H:%M:%S', time.localtime(time.time()))
print("%s[%2d/%2d][%s] bit:%d, dataset:%s, training...." % (
config["info"], epoch + 1, config["epoch"], current_time, bit, config["dataset"]), end="")
net.train()
train_loss = 0
for image, label, ind in train_loader:
image = image.to(device)
label = label.to(device)
optimizer.zero_grad()
u = net(image)
loss = criterion(u, label.float(), ind, config)
train_loss += loss.item()
loss.backward()
optimizer.step()
train_loss = train_loss / len(train_loader)
print("\b\b\b\b\b\b\b loss:%.3f" % (train_loss))
if (epoch + 1) % config["test_map"] == 0:
Best_mAP = validate(config, Best_mAP, test_loader, dataset_loader, net, bit, epoch, num_dataset)
if __name__ == "__main__":
config = get_config()
print(config)
for bit in config["bit_list"]:
train_val(config, bit)