Navigation Menu

Skip to content

Instantly share code, notes, and snippets.

@Erotemic
Created April 8, 2018 17:26
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save Erotemic/27f80be9eca982aa51518770b826b6a4 to your computer and use it in GitHub Desktop.
Save Erotemic/27f80be9eca982aa51518770b826b6a4 to your computer and use it in GitHub Desktop.
RegionLoss Profile
Timer unit: 1e-06 s
Pystone time: 0.026254 s
File: /home/joncrall/code/netharn/netharn/models/yolo2/light_region_loss.py
Function: _build_targets_tensor at line 382
Line # Hits Time Per Hit % Time Line Contents
==============================================================
382 @profiler.profile
383 def _build_targets_tensor(self, pred_boxes, pred_confs, ground_truth, nH, nW, seen=0):
384 """
385 Compare prediction boxes and ground truths, convert ground truths to network output tensors
386
387 Example:
388 >>> from netharn.models.yolo2.light_yolo import Yolo
389 >>> from netharn.models.yolo2.light_region_loss import RegionLoss
390 >>> torch.random.manual_seed(0)
391 >>> network = Yolo(num_classes=2, conf_thresh=4e-2)
392 >>> self = RegionLoss(num_classes=network.num_classes, anchors=network.anchors)
393 >>> Win, Hin = 96, 96
394 >>> nW, nH = 3, 3
395 >>> # true boxes for each item in the batch
396 >>> # each box encodes class, center, width, and height
397 >>> # coordinates are normalized in the range 0 to 1
398 >>> # items in each batch are padded with dummy boxes with class_id=-1
399 >>> ground_truth = torch.FloatTensor([
400 >>> # boxes for batch item 0 (it has no objects, note the pad!)
401 >>> [[-1, 0, 0, 0, 0],
402 >>> [-1, 0, 0, 0, 0],
403 >>> [-1, 0, 0, 0, 0]],
404 >>> # boxes for batch item 1
405 >>> [[0, 0.50, 0.50, 1.00, 1.00],
406 >>> [1, 0.34, 0.32, 0.12, 0.32],
407 >>> [1, 0.32, 0.42, 0.22, 0.12]],
408 >>> ])
409 >>> pred_boxes = torch.rand(90, 4)
410 >>> pred_confs = torch.rand(90)
411 >>> seen = 0
412
413 """
414 # Parameters
415 1 10.0 10.0 0.0 nB = ground_truth.size(0)
416 1 8.0 8.0 0.0 nT = ground_truth.size(1)
417 1 8.0 8.0 0.0 nA = self.num_anchors
418 1 8.0 8.0 0.0 nAnchors = nA * nH * nW
419 1 8.0 8.0 0.0 nPixels = nH * nW
420
421 1 8.0 8.0 0.0 seen = seen + nB
422
423 # Tensors
424 1 36.0 36.0 0.1 conf_mask = torch.ones(nB, nA, nPixels) * self.noobject_scale
425 1 19.0 19.0 0.1 coord_mask = torch.zeros(nB, nA, 1, nPixels)
426 1 64.0 64.0 0.2 cls_mask = torch.zeros(nB, nA, nPixels).byte()
427 1 24.0 24.0 0.1 tcoord = torch.zeros(nB, nA, 4, nPixels)
428 1 18.0 18.0 0.1 tconf = torch.zeros(nB, nA, nPixels)
429 1 17.0 17.0 0.1 tcls = torch.zeros(nB, nA, nPixels)
430
431 1 9.0 9.0 0.0 if seen < 12800:
432 1 16.0 16.0 0.1 coord_mask.fill_(1)
433 1 24.0 24.0 0.1 tcoord[:, :, 0].fill_(0.5)
434 1 19.0 19.0 0.1 tcoord[:, :, 1].fill_(0.5)
435
436 1 8.0 8.0 0.0 pred_cxywh = pred_boxes
437 1 243.0 243.0 0.9 pred_tlbr = util.Boxes(pred_cxywh.data.cpu().numpy(), 'cxywh').toformat('tlbr').data
438
439 1 23.0 23.0 0.1 gt_class = ground_truth[..., 0].data.cpu().numpy()
440 1 91.0 91.0 0.3 gt_cxywh = util.Boxes(ground_truth[..., 1:5].numpy().astype(np.float32), 'cxywh').scale([nW, nH])
441
442 1 72.0 72.0 0.3 gt_tlbr = gt_cxywh.to_tlbr().data
443
444 1 19.0 19.0 0.1 rel_gt_cxywh = gt_cxywh.copy()
445 1 16.0 16.0 0.1 rel_gt_cxywh.data.T[0:2] = 0
446
447 1 70.0 70.0 0.3 rel_gt_tlbr = rel_gt_cxywh.toformat('tlbr').data
448
449 1 19.0 19.0 0.1 gt_isvalid = (gt_class >= 0)
450
451 1 8.0 8.0 0.0 batch_assigns = []
452 9 70.0 7.8 0.3 for bx in range(nB):
453 # Get the actual groundtruth boxes for this batch item
454 8 64.0 8.0 0.2 flags = gt_isvalid[bx]
455 8 191.0 23.9 0.7 if not np.any(flags):
456 1 6.0 6.0 0.0 batch_assigns.append((None, None))
457 1 5.0 5.0 0.0 continue
458
459 # Create gt anchor assignments
460 7 135.0 19.3 0.5 batch_rel_gt_tlbr = rel_gt_tlbr[bx][flags]
461 7 56.0 8.0 0.2 anchor_ious = util.box_ious(self.rel_anchors_tlbr,
462 7 46.0 6.6 0.2 batch_rel_gt_tlbr, bias=0,
463 7 172.0 24.6 0.7 mode=self.iou_mode)
464 7 126.0 18.0 0.5 best_ns = np.argmax(anchor_ious, axis=0)
465 7 136.0 19.4 0.5 best_anchor_ious = anchor_ious.max(axis=0)
466 7 50.0 7.1 0.2 batch_assigns.append((best_ns, best_anchor_ious))
467
468 # Setting confidence mask
469 7 64.0 9.1 0.2 cur_pred_tlbr = pred_tlbr[bx * nAnchors:(bx + 1) * nAnchors]
470 7 108.0 15.4 0.4 cur_gt_tlbr = gt_tlbr[bx][flags]
471
472 7 50.0 7.1 0.2 ious = util.box_ious(cur_pred_tlbr, cur_gt_tlbr, bias=0,
473 7 533.0 76.1 2.0 mode=self.iou_mode)
474 7 421.0 60.1 1.6 cur_ious = torch.FloatTensor(ious.max(-1))
475 7 452.0 64.6 1.7 conf_mask[bx].view(-1)[cur_ious > self.thresh] = 0
476
477 # Loop over ground_truths and construct tensors
478 9 50.0 5.6 0.2 for bx in range(nB):
479 8 45.0 5.6 0.2 best_ns, best_anchor_ious = batch_assigns[bx]
480 8 53.0 6.6 0.2 flags = gt_isvalid[bx]
481 8 158.0 19.8 0.6 if not np.any(flags):
482 1 5.0 5.0 0.0 continue
483 75 441.0 5.9 1.7 for t in range(nT):
484 74 451.0 6.1 1.7 if not flags[t]:
485 6 30.0 5.0 0.1 break
486 68 672.0 9.9 2.6 gx, gy, gw, gh = gt_cxywh.data[bx][t]
487 68 516.0 7.6 2.0 gi = min(nW - 1, max(0, int(gx)))
488 68 438.0 6.4 1.7 gj = min(nH - 1, max(0, int(gy)))
489
490 68 395.0 5.8 1.5 best_n = best_ns[t]
491
492 68 432.0 6.4 1.6 gt_box_ = gt_tlbr[bx][t]
493 68 568.0 8.4 2.2 pred_box_ = pred_tlbr[bx * nAnchors + best_n * nPixels + gj * nW + gi]
494
495 68 463.0 6.8 1.8 iou = float(util.box_ious(gt_box_[None, :], pred_box_[None, :],
496 68 1390.0 20.4 5.3 bias=0, mode=self.iou_mode)[0, 0])
497
498 68 472.0 6.9 1.8 best_anchor = self.anchors[best_n]
499 68 541.0 8.0 2.1 best_aw, best_ah = best_anchor
500
501 68 1655.0 24.3 6.3 coord_mask[bx, best_n, 0, gj * nW + gi] = 1
502 68 1470.0 21.6 5.6 cls_mask[bx, best_n, gj * nW + gi] = 1
503 68 1344.0 19.8 5.1 conf_mask[bx, best_n, gj * nW + gi] = self.object_scale
504
505 68 2211.0 32.5 8.4 tcoord[bx, best_n, 0, gj * nW + gi] = gx - gi
506 68 2083.0 30.6 7.9 tcoord[bx, best_n, 1, gj * nW + gi] = gy - gj
507 68 2102.0 30.9 8.0 tcoord[bx, best_n, 2, gj * nW + gi] = math.log(gw / best_aw)
508 68 2061.0 30.3 7.9 tcoord[bx, best_n, 3, gj * nW + gi] = math.log(gh / best_ah)
509 68 1277.0 18.8 4.9 tconf[bx, best_n, gj * nW + gi] = iou
510 68 1376.0 20.2 5.2 tcls[bx, best_n, gj * nW + gi] = ground_truth[bx, t, 0]
511
512 1 5.0 5.0 0.0 return coord_mask, conf_mask, cls_mask, tcoord, tconf, tcls
Pystone time: 0.062961 s
File: /home/joncrall/code/netharn/netharn/models/yolo2/light_region_loss.py
Function: forward at line 284
Line # Hits Time Per Hit % Time Line Contents
==============================================================
284 @profiler.profile
285 def forward(self, output, target, seen=0):
286 """ Compute Region loss.
287
288 Args:
289 output (torch.autograd.Variable): Output from the network
290 target (brambox.boxes.annotations.Annotation or torch.Tensor): Brambox annotations or tensor containing the annotation targets (see :class:`lightnet.data.BramboxToTensor`)
291 seen (int): if specified, overrides the `seen` attribute read from `self.net` (default None)
292
293 Note:
294 If target is a tensor, the shape should be [B, T, 5], where B is
295 the batch size, T is the maximum number of boxes in an item, and
296 the final dimension should correspond to [class_idx, center_x,
297 center_y, width, height]. Items with fewer than T boxes should be
298 padded with dummy boxes with class_idx=-1.
299 """
300 # Parameters
301 1 14.0 14.0 0.0 nB = output.data.size(0)
302 1 8.0 8.0 0.0 nA = self.num_anchors
303 1 6.0 6.0 0.0 nC = self.num_classes
304 1 9.0 9.0 0.0 nH = output.data.size(2)
305 1 8.0 8.0 0.0 nW = output.data.size(3)
306 1 10.0 10.0 0.0 cuda = output.is_cuda
307 1 9.0 9.0 0.0 if isinstance(target, Variable):
308 1 7.0 7.0 0.0 target = target.data
309
310 # Get x,y,w,h,conf,cls
311 1 39.0 39.0 0.1 output = output.view(nB, nA, -1, nH * nW)
312 1 116.0 116.0 0.2 coord = torch.zeros_like(output[:, :, :4])
313 1 343.0 343.0 0.5 coord[:, :, 0:2] = output[:, :, 0:2].sigmoid() # tx,ty
314 1 47.0 47.0 0.1 coord[:, :, 2:4] = output[:, :, 2:4] # tw,th
315 1 158.0 158.0 0.3 conf = output[:, :, 4].sigmoid()
316 1 7.0 7.0 0.0 if nC > 1:
317 1 90.0 90.0 0.1 cls = output[:, :, 5:].contiguous().view(nB * nA, nC, nH * nW).transpose(1, 2).contiguous().view(-1, nC)
318
319 # Create prediction boxes
320 1 157.0 157.0 0.2 pred_boxes, lin_x, lin_y = self._init_pred_boxes(cuda, nB, nA, nH, nW)
321
322 1 97.0 97.0 0.2 pred_boxes[:, 0] = (coord[:, :, 0].data + lin_x).view(-1)
323 1 58.0 58.0 0.1 pred_boxes[:, 1] = (coord[:, :, 1].data + lin_y).view(-1)
324 1 211.0 211.0 0.3 pred_boxes[:, 2] = (coord[:, :, 2].data.exp() * self.anchor_w).view(-1)
325 1 175.0 175.0 0.3 pred_boxes[:, 3] = (coord[:, :, 3].data.exp() * self.anchor_h).view(-1)
326 1 14.0 14.0 0.0 pred_boxes = pred_boxes.cpu()
327
328 # Create predicted confs
329 1 13.0 13.0 0.0 pred_confs = torch.FloatTensor(nB * nA * nH * nW)
330 1 17.0 17.0 0.0 pred_confs = conf.data.view(-1).cpu()
331
332 # Get target values
333 1 7.0 7.0 0.0 coord_mask, conf_mask, cls_mask, tcoord, tconf, tcls = self.build_targets(
334 1 36113.0 36113.0 57.4 pred_boxes, pred_confs, target, nH, nW, seen=seen)
335 1 23.0 23.0 0.0 coord_mask = coord_mask.expand_as(tcoord)
336 1 4.0 4.0 0.0 if nC > 1:
337 1 75.0 75.0 0.1 tcls = tcls.view(-1)[cls_mask.view(-1)].long()
338 1 74.0 74.0 0.1 cls_mask = cls_mask.view(-1, 1).repeat(1, nC)
339
340 1 4.0 4.0 0.0 if cuda:
341 tcoord = tcoord.cuda()
342 tconf = tconf.cuda()
343 coord_mask = coord_mask.cuda()
344 conf_mask = conf_mask.cuda()
345 if nC > 1:
346 tcls = tcls.cuda()
347 cls_mask = cls_mask.cuda()
348
349 1 9.0 9.0 0.0 tcoord = Variable(tcoord, requires_grad=False)
350 1 6.0 6.0 0.0 tconf = Variable(tconf, requires_grad=False)
351 1 7.0 7.0 0.0 coord_mask = Variable(coord_mask, requires_grad=False)
352 1 23556.0 23556.0 37.4 conf_mask = Variable(conf_mask.sqrt(), requires_grad=False)
353 1 10.0 10.0 0.0 if nC > 1:
354 1 13.0 13.0 0.0 tcls = Variable(tcls, requires_grad=False)
355 1 12.0 12.0 0.0 cls_mask = Variable(cls_mask, requires_grad=False)
356 1 161.0 161.0 0.3 cls = cls[cls_mask].view(-1, nC)
357
358 # Compute losses
359 1 391.0 391.0 0.6 loss_coord = self.coord_scale * self.mse(coord * coord_mask, tcoord * coord_mask) / nB
360 1 134.0 134.0 0.2 loss_conf = self.mse(conf * conf_mask, tconf * conf_mask) / nB
361 1 10.0 10.0 0.0 if nC > 1:
362 1 529.0 529.0 0.8 loss_cls = self.class_scale * 2 * nn.CrossEntropyLoss(size_average=False)(cls, tcls) / nB
363 1 51.0 51.0 0.1 loss_tot = loss_coord + loss_conf + loss_cls
364 1 45.0 45.0 0.1 self.loss_cls = float(loss_cls.data.cpu().numpy())
365 else:
366 self.loss_cls = 0
367 loss_tot = loss_coord + loss_conf
368
369 1 36.0 36.0 0.1 self.loss_tot = float(loss_tot.data.cpu().numpy())
370 1 34.0 34.0 0.1 self.loss_coord = float(loss_coord.data.cpu().numpy())
371 1 34.0 34.0 0.1 self.loss_conf = float(loss_conf.data.cpu().numpy())
372
373 1 10.0 10.0 0.0 return loss_tot
Pystone time: 0.128014 s
File: /home/joncrall/code/lightnet/lightnet/network/loss.py
Function: __build_targets_tensor at line 141
Line # Hits Time Per Hit % Time Line Contents
==============================================================
141 @profiler.profile
142 def __build_targets_tensor(self, pred_boxes, pred_confs, ground_truth, nH, nW):
143 """ Compare prediction boxes and ground truths, convert ground truths to network output tensors """
144 # Parameters
145 1 3.0 3.0 0.0 nB = ground_truth.size(0)
146 1 3.0 3.0 0.0 nT = ground_truth.size(1)
147 1 2.0 2.0 0.0 nA = self.num_anchors
148 1 2.0 2.0 0.0 nAnchors = nA*nH*nW
149 1 2.0 2.0 0.0 nPixels = nH*nW
150 1 3.0 3.0 0.0 seen = self.net.seen + nB
151
152 # Tensors
153 1 25.0 25.0 0.0 conf_mask = torch.ones(nB, nA, nH*nW) * self.noobject_scale
154 1 12.0 12.0 0.0 coord_mask = torch.zeros(nB, nA, 1, nH*nW)
155 1 30.0 30.0 0.0 cls_mask = torch.zeros(nB, nA, nH*nW).byte()
156 1 16.0 16.0 0.0 tcoord = torch.zeros(nB, nA, 4, nH*nW)
157 1 11.0 11.0 0.0 tconf = torch.zeros(nB, nA, nH*nW)
158 1 10.0 10.0 0.0 tcls = torch.zeros(nB, nA, nH*nW)
159
160 1 2.0 2.0 0.0 if seen < 12800:
161 1 9.0 9.0 0.0 coord_mask.fill_(1)
162 1 3.0 3.0 0.0 if self.anchor_step == 4:
163 tcoord[:,:,0] = torch.Tensor(self.anchors[2::self.anchor_step]).view(1,nA,1,1).repeat(nB,1,1,nH*nW)
164 tcoord[:,:,1] = torch.Tensor(self.anchors[3::self.anchor_step]).view(1,nA,1,1).repeat(nB,1,1,nH*nW)
165 else:
166 1 16.0 16.0 0.0 tcoord[:,:,0].fill_(0.5)
167 1 13.0 13.0 0.0 tcoord[:,:,1].fill_(0.5)
168
169 # Setting confidence mask
170 9 17.0 1.9 0.0 for b in range(nB):
171 8 42.0 5.2 0.0 cur_pred_boxes = pred_boxes[b*nAnchors:(b+1)*nAnchors]
172 8 36.0 4.5 0.0 cur_pred_confs = pred_confs[b*nAnchors:(b+1)*nAnchors]
173 8 67.0 8.4 0.1 cur_ious = torch.zeros(nAnchors)
174 76 141.0 1.9 0.1 for t in range(nT):
175 75 1003.0 13.4 0.8 if ground_truth[b][t][0] < 0:
176 7 11.0 1.6 0.0 break
177 68 836.0 12.3 0.7 gx = ground_truth[b][t][1] * nW
178 68 766.0 11.3 0.6 gy = ground_truth[b][t][2] * nH
179 68 778.0 11.4 0.6 gw = ground_truth[b][t][3] * nW
180 68 738.0 10.9 0.6 gh = ground_truth[b][t][4] * nH
181 68 1632.0 24.0 1.3 cur_gt_boxes = torch.FloatTensor([gx,gy,gw,gh]).repeat(nAnchors,1)
182 68 22055.0 324.3 17.2 cur_ious = torch.max(cur_ious, bbox_multi_ious(cur_pred_boxes, cur_gt_boxes))
183 8 265.0 33.1 0.2 conf_mask[b].view(-1)[cur_ious > self.thresh] = 0
184
185 # Loop over ground_truths and construct tensors
186 9 11.0 1.2 0.0 for b in range(nB):
187 76 114.0 1.5 0.1 for t in range(nT):
188 75 785.0 10.5 0.6 if ground_truth[b][t][0] < 0:
189 7 10.0 1.4 0.0 break
190 68 135.0 2.0 0.1 best_iou = 0.0
191 68 80.0 1.2 0.1 best_n = -1
192 68 72.0 1.1 0.1 min_dist = 10000
193 68 748.0 11.0 0.6 gx = ground_truth[b][t][1] * nW
194 68 681.0 10.0 0.5 gy = ground_truth[b][t][2] * nH
195 68 687.0 10.1 0.5 gw = ground_truth[b][t][3] * nW
196 68 679.0 10.0 0.5 gh = ground_truth[b][t][4] * nH
197 68 304.0 4.5 0.2 gi = min(nW-1, max(0, int(gx)))
198 68 196.0 2.9 0.2 gj = min(nH-1, max(0, int(gy)))
199 68 210.0 3.1 0.2 gt_box = [0, 0, gw, gh]
200 408 533.0 1.3 0.4 for n in range(nA):
201 340 714.0 2.1 0.6 aw = self.anchors[self.anchor_step*n]
202 340 481.0 1.4 0.4 ah = self.anchors[self.anchor_step*n+1]
203 340 496.0 1.5 0.4 anchor_box = [0, 0, aw, ah]
204 340 68238.0 200.7 53.3 iou = bbox_iou(anchor_box, gt_box)
205 340 657.0 1.9 0.5 if self.anchor_step == 4:
206 ax = self.anchors[self.anchor_step*n+2]
207 ay = self.anchors[self.anchor_step*n+3]
208 dist = pow(((gi+ax) - gx), 2) + pow(((gj+ay) - gy), 2)
209 340 2078.0 6.1 1.6 if iou > best_iou:
210 124 188.0 1.5 0.1 best_iou = iou
211 124 157.0 1.3 0.1 best_n = n
212 216 326.0 1.5 0.3 elif self.anchor_step==4 and iou == best_iou and dist < min_dist:
213 best_iou = iou
214 best_n = n
215 min_dist = dist
216
217 68 94.0 1.4 0.1 gt_box = [gx, gy, gw, gh]
218 68 370.0 5.4 0.3 pred_box = pred_boxes[b*nAnchors+best_n*nPixels+gj*nW+gi]
219 68 266.0 3.9 0.2 pred_conf = pred_confs[b*nAnchors+best_n*nPixels+gj*nW+gi]
220 68 10066.0 148.0 7.9 iou = bbox_iou(gt_box, pred_box)
221
222 68 1254.0 18.4 1.0 coord_mask[b][best_n][0][gj*nW+gi] = 1
223 68 1051.0 15.5 0.8 cls_mask[b][best_n][gj*nW+gi] = 1
224 68 928.0 13.6 0.7 conf_mask[b][best_n][gj*nW+gi] = self.object_scale
225 68 1140.0 16.8 0.9 tcoord[b][best_n][0][gj*nW+gi] = gx - gi
226 68 1070.0 15.7 0.8 tcoord[b][best_n][1][gj*nW+gi] = gy - gj
227 68 1598.0 23.5 1.2 tcoord[b][best_n][2][gj*nW+gi] = math.log(gw/self.anchors[self.anchor_step*best_n])
228 68 1510.0 22.2 1.2 tcoord[b][best_n][3][gj*nW+gi] = math.log(gh/self.anchors[self.anchor_step*best_n+1])
229 68 655.0 9.6 0.5 tconf[b][best_n][gj*nW+gi] = iou
230 68 882.0 13.0 0.7 tcls[b][best_n][gj*nW+gi] = ground_truth[b][t][0]
231
232 1 1.0 1.0 0.0 return coord_mask, conf_mask, cls_mask, tcoord, tconf, tcls
Pystone time: 0.138044 s
File: /home/joncrall/code/lightnet/lightnet/network/loss.py
Function: __call__ at line 39
Line # Hits Time Per Hit % Time Line Contents
==============================================================
39 @profiler.profile
40 def __call__(self, output, target):
41 """ Compute Region loss.
42
43 Args:
44 output (torch.autograd.Variable): Output from the network
45 target (brambox.boxes.annotations.Annotation or torch.Tensor): Brambox annotations or tensor containing the annotation targets (see :class:`lightnet.data.BramboxToTensor`)
46
47 Note:
48 If target is a tensor, the shape should be [B, T, 5], where B is
49 the batch size, T is the maximum number of boxes in an item, and
50 the final dimension should correspond to [class_idx, center_x,
51 center_y, width, height]. Items with fewer than T boxes should be
52 padded with dummy boxes with class_idx=-1.
53 """
54 # Parameters
55 1 8.0 8.0 0.0 nB = output.data.size(0)
56 1 3.0 3.0 0.0 nA = self.num_anchors
57 1 3.0 3.0 0.0 nC = self.num_classes
58 1 4.0 4.0 0.0 nH = output.data.size(2)
59 1 4.0 4.0 0.0 nW = output.data.size(3)
60 1 4.0 4.0 0.0 cuda = output.is_cuda
61 1 4.0 4.0 0.0 if isinstance(target, Variable):
62 1 3.0 3.0 0.0 target = target.data
63
64 # Get x,y,w,h,conf,cls
65 1 20.0 20.0 0.0 output = output.view(nB, nA, -1, nH*nW)
66 1 45.0 45.0 0.0 coord = torch.zeros_like(output[:,:,:4])
67 1 468.0 468.0 0.3 coord[:,:,:2] = output[:,:,:2].sigmoid() # tx,ty
68 1 66.0 66.0 0.0 coord[:,:,2:4] = output[:,:,2:4] # tw,th
69 1 220.0 220.0 0.2 conf = output[:,:,4].sigmoid()
70 1 3.0 3.0 0.0 if nC > 1:
71 1 99.0 99.0 0.1 cls = output[:,:,5:].contiguous().view(nB*nA, nC, nH*nW).transpose(1,2).contiguous().view(-1, nC)
72
73 # Create prediction boxes
74 1 13.0 13.0 0.0 pred_boxes = torch.FloatTensor(nB*nA*nH*nW, 4)
75 1 44.0 44.0 0.0 lin_x = torch.linspace(0, nW-1, nW).repeat(nH,1).view(nH*nW)
76 1 52.0 52.0 0.0 lin_y = torch.linspace(0, nH-1, nH).repeat(nW,1).t().contiguous().view(nH*nW)
77 1 41.0 41.0 0.0 anchor_w = torch.Tensor(self.anchors[::self.anchor_step]).view(nA, 1)
78 1 28.0 28.0 0.0 anchor_h = torch.Tensor(self.anchors[1::self.anchor_step]).view(nA, 1)
79 1 3.0 3.0 0.0 if cuda:
80 pred_boxes = pred_boxes.cuda()
81 lin_x = lin_x.cuda()
82 lin_y = lin_y.cuda()
83 anchor_w = anchor_w.cuda()
84 anchor_h = anchor_h.cuda()
85
86 1 85.0 85.0 0.1 pred_boxes[:,0] = (coord[:,:,0].data + lin_x).view(-1)
87 1 92.0 92.0 0.1 pred_boxes[:,1] = (coord[:,:,1].data + lin_y).view(-1)
88 1 208.0 208.0 0.2 pred_boxes[:,2] = (coord[:,:,2].data.exp() * anchor_w).view(-1)
89 1 188.0 188.0 0.1 pred_boxes[:,3] = (coord[:,:,3].data.exp() * anchor_h).view(-1)
90 1 5.0 5.0 0.0 pred_boxes = pred_boxes.cpu()
91
92 # Create predicted confs
93 1 9.0 9.0 0.0 pred_confs = torch.FloatTensor(nB*nA*nH*nW)
94 1 12.0 12.0 0.0 pred_confs = conf.data.view(-1).cpu()
95
96 # Get target values
97 1 135520.0 135520.0 98.2 coord_mask,conf_mask,cls_mask,tcoord,tconf,tcls = self.build_targets(pred_boxes,pred_confs,target,nH,nW)
98 1 9.0 9.0 0.0 coord_mask = coord_mask.expand_as(tcoord)
99 1 2.0 2.0 0.0 if nC > 1:
100 1 53.0 53.0 0.0 tcls = tcls.view(-1)[cls_mask.view(-1)].long()
101 1 67.0 67.0 0.0 cls_mask = cls_mask.view(-1, 1).repeat(1, nC)
102
103 1 1.0 1.0 0.0 if cuda:
104 tcoord = tcoord.cuda()
105 tconf = tconf.cuda()
106 coord_mask = coord_mask.cuda()
107 conf_mask = conf_mask.cuda()
108 if nC > 1:
109 tcls = tcls.cuda()
110 cls_mask = cls_mask.cuda()
111
112 1 5.0 5.0 0.0 tcoord = Variable(tcoord, requires_grad=False)
113 1 3.0 3.0 0.0 tconf = Variable(tconf, requires_grad=False)
114 1 3.0 3.0 0.0 coord_mask = Variable(coord_mask, requires_grad=False)
115 1 15.0 15.0 0.0 conf_mask = Variable(conf_mask.sqrt(), requires_grad=False)
116 1 2.0 2.0 0.0 if nC > 1:
117 1 3.0 3.0 0.0 tcls = Variable(tcls, requires_grad=False)
118 1 2.0 2.0 0.0 cls_mask = Variable(cls_mask, requires_grad=False)
119 1 63.0 63.0 0.0 cls = cls[cls_mask].view(-1, nC)
120
121 # Compute losses
122 1 71.0 71.0 0.1 mse = nn.MSELoss(size_average=False)
123 1 192.0 192.0 0.1 self.loss_coord = self.coord_scale * mse(coord*coord_mask, tcoord*coord_mask) / nB
124 1 67.0 67.0 0.0 self.loss_conf = mse(conf*conf_mask, tconf*conf_mask) / nB
125 1 2.0 2.0 0.0 if nC > 1:
126 1 209.0 209.0 0.2 self.loss_cls = self.class_scale * 2 * nn.CrossEntropyLoss(size_average=False)(cls, tcls) / nB
127 1 19.0 19.0 0.0 self.loss_tot = self.loss_coord + self.loss_conf + self.loss_cls
128 else:
129 self.loss_cls = None
130 self.loss_tot = self.loss_coord + self.loss_conf
131
132 1 2.0 2.0 0.0 return self.loss_tot
Pystone time: 16.0487 s
File: /home/joncrall/code/netharn/netharn/models/yolo2/light_region_loss.py
Function: profile_loss_speed at line 61
Line # Hits Time Per Hit % Time Line Contents
==============================================================
61 @profiler.profile
62 def profile_loss_speed():
63 """
64 python ~/code/netharn/netharn/models/yolo2/light_region_loss.py profile_loss_speed --profile
65
66 Example:
67 >>> profile_loss_speed()
68 """
69 1 660.0 660.0 0.0 from netharn.models.yolo2.light_yolo import Yolo
70 1 573.0 573.0 0.0 import netharn.models.yolo2.light_region_loss
71 1 9.0 9.0 0.0 import lightnet.network
72 1 8.0 8.0 0.0 import netharn as nh
73
74 1 23.0 23.0 0.0 rng = util.ensure_rng(0)
75 1 942.0 942.0 0.0 torch.random.manual_seed(0)
76 1 438761.0 438761.0 2.7 network = Yolo(num_classes=2, conf_thresh=4e-2)
77
78 1 10.0 10.0 0.0 self1 = netharn.models.yolo2.light_region_loss.RegionLoss(
79 1 561.0 561.0 0.0 num_classes=network.num_classes, anchors=network.anchors)
80 1 21.0 21.0 0.0 self2 = lightnet.network.RegionLoss(network=network)
81
82 1 8.0 8.0 0.0 bsize = 8
83 # Make a random semi-realistic set of groundtruth items
84 1 51.0 51.0 0.0 n_targets = [rng.randint(0, 20) for _ in range(bsize)]
85 1 9.0 9.0 0.0 target_list = [torch.FloatTensor(
86 np.hstack([rng.randint(0, network.num_classes, nT)[:, None],
87 util.Boxes.random(nT, scale=1.0, rng=rng).data]))
88 1 502.0 502.0 0.0 for nT in n_targets]
89 1 318.0 318.0 0.0 target = nh.data.collate.padded_collate(target_list)
90
91 1 8.0 8.0 0.0 Win, Hin = 416, 416
92 1 335648.0 335648.0 2.1 im_data = torch.randn(len(target), 3, Hin, Win)
93 1 15068571.0 15068571.0 93.9 output = network.forward(im_data)
94
95 1 63498.0 63498.0 0.4 loss1 = float(self1(output, target))
96 1 138240.0 138240.0 0.9 loss2 = float(self2(output, target))
97 1 154.0 154.0 0.0 print('loss1 = {!r}'.format(loss1))
98 1 102.0 102.0 0.0 print('loss2 = {!r}'.format(loss2))
-1.00 seconds - None :None:None
0.03 seconds - RegionLoss._build_targets_tensor:/home/joncrall/code/netharn/netharn/models/yolo2/light_region_loss.py:382
0.06 seconds - RegionLoss.forward :/home/joncrall/code/netharn/netharn/models/yolo2/light_region_loss.py:284
0.13 seconds - __build_targets_tensor :/home/joncrall/code/lightnet/lightnet/network/loss.py:141
0.14 seconds - __call__ :/home/joncrall/code/lightnet/lightnet/network/loss.py:39
16.05 seconds - profile_loss_speed :/home/joncrall/code/netharn/netharn/models/yolo2/light_region_loss.py:61
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment