Created
March 1, 2017 03:55
-
-
Save salilkapur/796e73c1173e5dd84731276f50009359 to your computer and use it in GitHub Desktop.
Diff for resnet model
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
diff --git a/slim/nets/resnet_v1.py b/slim/nets/resnet_v1.py | |
index 03d49ed..edc2af9 100644 | |
--- a/slim/nets/resnet_v1.py | |
+++ b/slim/nets/resnet_v1.py | |
@@ -226,7 +226,7 @@ def resnet_v1_50(inputs, | |
return resnet_v1(inputs, blocks, num_classes, is_training, | |
global_pool=global_pool, output_stride=output_stride, | |
include_root_block=True, reuse=reuse, scope=scope) | |
- | |
+resnet_v1_50.default_image_size = resnet_v1.default_image_size | |
def resnet_v1_101(inputs, | |
num_classes=None, | |
@@ -249,6 +249,7 @@ def resnet_v1_101(inputs, | |
return resnet_v1(inputs, blocks, num_classes, is_training, | |
global_pool=global_pool, output_stride=output_stride, | |
include_root_block=True, reuse=reuse, scope=scope) | |
+resnet_v1_101.default_image_size = resnet_v1.default_image_size | |
def resnet_v1_152(inputs, | |
diff --git a/slim/preprocessing/vgg_preprocessing.py b/slim/preprocessing/vgg_preprocessing.py | |
index 672c740..e226f89 100644 | |
--- a/slim/preprocessing/vgg_preprocessing.py | |
+++ b/slim/preprocessing/vgg_preprocessing.py | |
@@ -73,7 +73,7 @@ def _crop(image, offset_height, offset_width, crop_height, crop_width): | |
['Rank of image must be equal to 3.']) | |
cropped_shape = control_flow_ops.with_dependencies( | |
[rank_assertion], | |
- tf.pack([crop_height, crop_width, original_shape[2]])) | |
+ tf.stack([crop_height, crop_width, original_shape[2]])) | |
size_assertion = tf.Assert( | |
tf.logical_and( | |
@@ -81,7 +81,7 @@ def _crop(image, offset_height, offset_width, crop_height, crop_width): | |
tf.greater_equal(original_shape[1], crop_width)), | |
['Crop size greater than the image size.']) | |
- offsets = tf.to_int32(tf.pack([offset_height, offset_width, 0])) | |
+ offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) | |
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to | |
# define the crop size. | |
@@ -227,10 +227,10 @@ def _mean_image_subtraction(image, means): | |
if len(means) != num_channels: | |
raise ValueError('len(means) must match the number of channels') | |
- channels = tf.split(2, num_channels, image) | |
+ channels = tf.split(image, num_channels, 2) | |
for i in range(num_channels): | |
channels[i] -= means[i] | |
- return tf.concat(2, channels) | |
+ return tf.concat(channels, 2) | |
def _smallest_size_at_least(height, width, smallest_side): |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment