diff --git a/keras_cv/src/datasets/pascal_voc/segmentation_test.py b/keras_cv/src/datasets/pascal_voc/segmentation_test.py index 3695a31c04..0cf20a7b8f 100644 --- a/keras_cv/src/datasets/pascal_voc/segmentation_test.py +++ b/keras_cv/src/datasets/pascal_voc/segmentation_test.py @@ -105,13 +105,13 @@ def test_get_image_ids(self): train_ids = ["2007_000032", "2007_000039", "2007_000063"] eval_ids = ["2007_000033"] train_eval_ids = train_ids + eval_ids - self.assertEquals( + self.assertEqual( segmentation._get_image_ids(data_dir, "train"), train_ids ) - self.assertEquals( + self.assertEqual( segmentation._get_image_ids(data_dir, "eval"), eval_ids ) - self.assertEquals( + self.assertEqual( segmentation._get_image_ids(data_dir, "trainval"), train_eval_ids ) @@ -161,7 +161,7 @@ def test_parse_annotation_file(self): }, ], } - self.assertEquals(metadata, expected_result) + self.assertEqual(metadata, expected_result) def test_decode_png_mask(self): local_data_dir = os.path.join(self.tempdir, "pascal_voc_2012/") @@ -177,19 +177,19 @@ def test_decode_png_mask(self): segmentation._maybe_populate_voc_color_mapping() mask = segmentation._decode_png_mask(mask) - self.assertEquals(mask.shape, (281, 500, 1)) - self.assertEquals( + self.assertEqual(mask.shape, (281, 500, 1)) + self.assertEqual( tf.reduce_max(mask), 255 ) # The 255 value is for the boundary - self.assertEquals( + self.assertEqual( tf.reduce_min(mask), 0 ) # The 0 value is for the background # The mask contains two classes, 1 and 15, see the label section in the # previous test case. - self.assertEquals( + self.assertEqual( tf.reduce_sum(tf.cast(tf.equal(mask, 1), tf.int32)), 4734 ) - self.assertEquals( + self.assertEqual( tf.reduce_sum(tf.cast(tf.equal(mask, 15), tf.int32)), 866 ) @@ -245,7 +245,7 @@ def test_parse_single_image(self): data_dir, "SegmentationObject", "2007_000032.png" ), } - self.assertEquals(result_dict, expected_result) + self.assertEqual(result_dict, expected_result) def test_build_metadata(self): local_data_dir = os.path.join(self.tempdir, "pascal_voc_2012/") @@ -257,7 +257,7 @@ def test_build_metadata(self): image_ids = segmentation._get_image_ids(data_dir, "trainval") metadata = segmentation._build_metadata(data_dir, image_ids) - self.assertEquals( + self.assertEqual( metadata["image/filename"], [ "2007_000032.jpg", @@ -296,7 +296,7 @@ def test_build_dataset(self): dataset = segmentation._build_dataset_from_metadata(metadata) entry = next(dataset.take(1).as_numpy_iterator()) - self.assertEquals(entry["image/filename"], b"2007_000032.jpg") + self.assertEqual(entry["image/filename"], b"2007_000032.jpg") expected_keys = [ "image", "image/filename", @@ -316,18 +316,18 @@ def test_build_dataset(self): # Check the mask png content png = entry["class_segmentation"] - self.assertEquals(png.shape, (281, 500, 1)) - self.assertEquals( + self.assertEqual(png.shape, (281, 500, 1)) + self.assertEqual( tf.reduce_max(png), 255 ) # The 255 value is for the boundary - self.assertEquals( + self.assertEqual( tf.reduce_min(png), 0 ) # The 0 value is for the background # The mask contains two classes, 1 and 15, see the label section in the # previous test case. - self.assertEquals( + self.assertEqual( tf.reduce_sum(tf.cast(tf.equal(png, 1), tf.int32)), 4734 ) - self.assertEquals( + self.assertEqual( tf.reduce_sum(tf.cast(tf.equal(png, 15), tf.int32)), 866 ) diff --git a/keras_cv/src/datasets/waymo/load_test.py b/keras_cv/src/datasets/waymo/load_test.py index 761791666d..f358ef7ac8 100644 --- a/keras_cv/src/datasets/waymo/load_test.py +++ b/keras_cv/src/datasets/waymo/load_test.py @@ -44,7 +44,7 @@ def test_load_from_directory(self): # Extract records into a list dataset = [record for record in dataset] - self.assertEquals(len(dataset), 1) + self.assertEqual(len(dataset), 1) self.assertNotEqual(dataset[0]["timestamp_micros"], 0) @pytest.mark.skipif( @@ -58,5 +58,5 @@ def test_load_from_files(self): # Extract records into a list dataset = [record for record in dataset] - self.assertEquals(len(dataset), 1) + self.assertEqual(len(dataset), 1) self.assertNotEqual(dataset[0]["timestamp_micros"], 0) diff --git a/keras_cv/src/layers/augmenter_test.py b/keras_cv/src/layers/augmenter_test.py index f05e356cf4..b93e7ca000 100644 --- a/keras_cv/src/layers/augmenter_test.py +++ b/keras_cv/src/layers/augmenter_test.py @@ -27,7 +27,7 @@ def test_call(self): ] ) output = augmenter(images) - self.assertEquals(output.shape, images.shape) + self.assertEqual(output.shape, images.shape) def test_call_with_labels(self): images = { @@ -43,4 +43,4 @@ def test_call_with_labels(self): ] ) output = augmenter(images) - self.assertEquals(output["images"].shape, images["images"].shape) + self.assertEqual(output["images"].shape, images["images"].shape) diff --git a/keras_cv/src/layers/feature_pyramid_test.py b/keras_cv/src/layers/feature_pyramid_test.py index 5fc21217e4..e86b6814d7 100644 --- a/keras_cv/src/layers/feature_pyramid_test.py +++ b/keras_cv/src/layers/feature_pyramid_test.py @@ -30,7 +30,7 @@ def test_return_type_dict(self): inputs = {2: c2, 3: c3, 4: c4, 5: c5} output = layer(inputs) self.assertTrue(isinstance(output, dict)) - self.assertEquals(sorted(output.keys()), [2, 3, 4, 5]) + self.assertEqual(sorted(output.keys()), [2, 3, 4, 5]) def test_result_shapes(self): layer = FeaturePyramid(min_level=2, max_level=5) @@ -42,9 +42,9 @@ def test_result_shapes(self): inputs = {2: c2, 3: c3, 4: c4, 5: c5} output = layer(inputs) for level in inputs.keys(): - self.assertEquals(output[level].shape[1], inputs[level].shape[1]) - self.assertEquals(output[level].shape[2], inputs[level].shape[2]) - self.assertEquals(output[level].shape[3], layer.num_channels) + self.assertEqual(output[level].shape[1], inputs[level].shape[1]) + self.assertEqual(output[level].shape[2], inputs[level].shape[2]) + self.assertEqual(output[level].shape[3], layer.num_channels) # Test with different resolution and channel size c2 = np.ones([2, 64, 128, 4]) @@ -56,9 +56,9 @@ def test_result_shapes(self): layer = FeaturePyramid(min_level=2, max_level=5) output = layer(inputs) for level in inputs.keys(): - self.assertEquals(output[level].shape[1], inputs[level].shape[1]) - self.assertEquals(output[level].shape[2], inputs[level].shape[2]) - self.assertEquals(output[level].shape[3], layer.num_channels) + self.assertEqual(output[level].shape[1], inputs[level].shape[1]) + self.assertEqual(output[level].shape[2], inputs[level].shape[2]) + self.assertEqual(output[level].shape[3], layer.num_channels) def test_with_keras_input_tensor(self): # This mimic the model building with Backbone network @@ -71,13 +71,13 @@ def test_with_keras_input_tensor(self): inputs = {2: c2, 3: c3, 4: c4, 5: c5} output = layer(inputs) for level in inputs.keys(): - self.assertEquals(output[level].shape[1], inputs[level].shape[1]) - self.assertEquals(output[level].shape[2], inputs[level].shape[2]) - self.assertEquals(output[level].shape[3], layer.num_channels) + self.assertEqual(output[level].shape[1], inputs[level].shape[1]) + self.assertEqual(output[level].shape[2], inputs[level].shape[2]) + self.assertEqual(output[level].shape[3], layer.num_channels) def test_invalid_lateral_layers(self): lateral_layers = [keras.layers.Conv2D(256, 1)] * 3 - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "Expect lateral_layers to be a dict" ): _ = FeaturePyramid( @@ -88,7 +88,7 @@ def test_invalid_lateral_layers(self): 3: keras.layers.Conv2D(256, 1), 4: keras.layers.Conv2D(256, 1), } - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "with keys as .* [2, 3, 4, 5]" ): _ = FeaturePyramid( @@ -97,7 +97,7 @@ def test_invalid_lateral_layers(self): def test_invalid_output_layers(self): output_layers = [keras.layers.Conv2D(256, 3)] * 3 - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "Expect output_layers to be a dict" ): _ = FeaturePyramid( @@ -108,7 +108,7 @@ def test_invalid_output_layers(self): 3: keras.layers.Conv2D(256, 3), 4: keras.layers.Conv2D(256, 3), } - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "with keys as .* [2, 3, 4, 5]" ): _ = FeaturePyramid( @@ -126,13 +126,13 @@ def test_invalid_input_features(self): # Build required for Keas 3 _ = layer(inputs) list_input = [c2, c3, c4, c5] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "expects input features to be a dict" ): layer(list_input) dict_input_with_missing_feature = {2: c2, 3: c3, 4: c4} - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "Expect feature keys.*[2, 3, 4, 5]" ): layer(dict_input_with_missing_feature) diff --git a/keras_cv/src/layers/fusedmbconv_test.py b/keras_cv/src/layers/fusedmbconv_test.py index 1e33a821d5..36e5598702 100644 --- a/keras_cv/src/layers/fusedmbconv_test.py +++ b/keras_cv/src/layers/fusedmbconv_test.py @@ -36,7 +36,7 @@ def test_same_input_output_shapes(self): layer = FusedMBConvBlock(input_filters=32, output_filters=32) output = layer(inputs) - self.assertEquals(output.shape, [1, 64, 64, 32]) + self.assertEqual(output.shape, [1, 64, 64, 32]) self.assertLen(output, 1) self.assertTrue(isinstance(output, tf.Tensor)) @@ -45,7 +45,7 @@ def test_different_input_output_shapes(self): layer = FusedMBConvBlock(input_filters=32, output_filters=48) output = layer(inputs) - self.assertEquals(output.shape, [1, 64, 64, 48]) + self.assertEqual(output.shape, [1, 64, 64, 48]) self.assertLen(output, 1) self.assertTrue(isinstance(output, tf.Tensor)) @@ -56,6 +56,6 @@ def test_squeeze_excitation_ratio(self): ) output = layer(inputs) - self.assertEquals(output.shape, [1, 64, 64, 48]) + self.assertEqual(output.shape, [1, 64, 64, 48]) self.assertLen(output, 1) self.assertTrue(isinstance(output, tf.Tensor)) diff --git a/keras_cv/src/layers/mbconv_test.py b/keras_cv/src/layers/mbconv_test.py index 8ac36764f6..78cb10eef7 100644 --- a/keras_cv/src/layers/mbconv_test.py +++ b/keras_cv/src/layers/mbconv_test.py @@ -36,7 +36,7 @@ def test_same_input_output_shapes(self): layer = MBConvBlock(input_filters=32, output_filters=32) output = layer(inputs) - self.assertEquals(output.shape, [1, 64, 64, 32]) + self.assertEqual(output.shape, [1, 64, 64, 32]) self.assertLen(output, 1) self.assertTrue(isinstance(output, tf.Tensor)) @@ -45,7 +45,7 @@ def test_different_input_output_shapes(self): layer = MBConvBlock(input_filters=32, output_filters=48) output = layer(inputs) - self.assertEquals(output.shape, [1, 64, 64, 48]) + self.assertEqual(output.shape, [1, 64, 64, 48]) self.assertLen(output, 1) self.assertTrue(isinstance(output, tf.Tensor)) @@ -54,6 +54,6 @@ def test_squeeze_excitation_ratio(self): layer = MBConvBlock(input_filters=32, output_filters=48, se_ratio=0.25) output = layer(inputs) - self.assertEquals(output.shape, [1, 64, 64, 48]) + self.assertEqual(output.shape, [1, 64, 64, 48]) self.assertLen(output, 1) self.assertTrue(isinstance(output, tf.Tensor)) diff --git a/keras_cv/src/layers/preprocessing/cut_mix_test.py b/keras_cv/src/layers/preprocessing/cut_mix_test.py index 0127b0bc55..dfad405fdc 100644 --- a/keras_cv/src/layers/preprocessing/cut_mix_test.py +++ b/keras_cv/src/layers/preprocessing/cut_mix_test.py @@ -236,7 +236,7 @@ def test_single_image_input(self): ys = tf.one_hot(tf.constant([1]), 2) inputs = {"images": xs, "labels": ys} layer = CutMix() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "CutMix received a single image to `call`" ): _ = layer(inputs) @@ -246,7 +246,7 @@ def test_int_labels(self): ys = tf.one_hot(tf.constant([1, 0]), 2, dtype=tf.int32) inputs = {"images": xs, "labels": ys} layer = CutMix() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "CutMix received labels with type" ): _ = layer(inputs) @@ -254,7 +254,7 @@ def test_int_labels(self): def test_image_input(self): xs = tf.ones((2, 512, 512, 3)) layer = CutMix() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "CutMix expects inputs in a dictionary with format" ): _ = layer(xs) diff --git a/keras_cv/src/layers/preprocessing/fourier_mix_test.py b/keras_cv/src/layers/preprocessing/fourier_mix_test.py index bde18d8504..0b4feb007d 100644 --- a/keras_cv/src/layers/preprocessing/fourier_mix_test.py +++ b/keras_cv/src/layers/preprocessing/fourier_mix_test.py @@ -147,7 +147,7 @@ def test_image_input_only(self): tf.float32, ) layer = FourierMix() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "expects inputs in a dictionary" ): _ = layer(xs) @@ -157,7 +157,7 @@ def test_single_image_input(self): ys = tf.one_hot(tf.constant([1]), 2) inputs = {"images": xs, "labels": ys} layer = FourierMix() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "FourierMix received a single image to `call`" ): _ = layer(inputs) diff --git a/keras_cv/src/layers/preprocessing/mix_up_test.py b/keras_cv/src/layers/preprocessing/mix_up_test.py index b8049608a8..3d18f9736b 100644 --- a/keras_cv/src/layers/preprocessing/mix_up_test.py +++ b/keras_cv/src/layers/preprocessing/mix_up_test.py @@ -159,7 +159,7 @@ def test_image_input_only(self): tf.float32, ) layer = MixUp() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "expects inputs in a dictionary" ): _ = layer(xs) @@ -169,7 +169,7 @@ def test_single_image_input(self): ys = tf.one_hot(tf.constant([1]), 2) inputs = {"images": xs, "labels": ys} layer = MixUp() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "MixUp received a single image to `call`" ): _ = layer(inputs) @@ -179,7 +179,7 @@ def test_int_labels(self): ys = tf.one_hot(tf.constant([1, 0]), 2, dtype=tf.int32) inputs = {"images": xs, "labels": ys} layer = MixUp() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "MixUp received labels with type" ): _ = layer(inputs) @@ -187,7 +187,7 @@ def test_int_labels(self): def test_image_input(self): xs = tf.ones((2, 512, 512, 3)) layer = MixUp() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "MixUp expects inputs in a dictionary with format" ): _ = layer(xs) diff --git a/keras_cv/src/layers/preprocessing/mosaic_test.py b/keras_cv/src/layers/preprocessing/mosaic_test.py index ec7574b199..ef4aa5ce6d 100644 --- a/keras_cv/src/layers/preprocessing/mosaic_test.py +++ b/keras_cv/src/layers/preprocessing/mosaic_test.py @@ -91,7 +91,7 @@ def test_image_input_only(self): tf.float32, ) layer = Mosaic() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "expects inputs in a dictionary" ): _ = layer(xs) @@ -101,7 +101,7 @@ def test_single_image_input(self): ys = tf.one_hot(tf.constant([1]), 2) inputs = {"images": xs, "labels": ys} layer = Mosaic() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "Mosaic received a single image to `call`" ): _ = layer(inputs) @@ -109,7 +109,7 @@ def test_single_image_input(self): def test_image_input(self): xs = tf.ones((2, 512, 512, 3)) layer = Mosaic() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "Mosaic expects inputs in a dictionary with format" ): _ = layer(xs) diff --git a/keras_cv/src/layers/preprocessing_3d/waymo/global_random_flip_test.py b/keras_cv/src/layers/preprocessing_3d/waymo/global_random_flip_test.py index 7dae0c41f8..e02141e504 100644 --- a/keras_cv/src/layers/preprocessing_3d/waymo/global_random_flip_test.py +++ b/keras_cv/src/layers/preprocessing_3d/waymo/global_random_flip_test.py @@ -52,18 +52,18 @@ def test_augment_batch_point_clouds_and_bounding_boxes(self): self.assertNotAllClose(inputs, outputs) def test_noop_raises_error(self): - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "must flip over at least 1 axis" ): _ = GlobalRandomFlip(flip_x=False, flip_y=False, flip_z=False) def test_flip_x_or_z_raises_error(self): - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "only supports flipping over the Y" ): _ = GlobalRandomFlip(flip_x=True, flip_y=False, flip_z=False) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "only supports flipping over the Y" ): _ = GlobalRandomFlip(flip_x=False, flip_y=False, flip_z=True) diff --git a/keras_cv/src/layers/regularization/squeeze_excite_test.py b/keras_cv/src/layers/regularization/squeeze_excite_test.py index a0492381fa..f38a4baca5 100644 --- a/keras_cv/src/layers/regularization/squeeze_excite_test.py +++ b/keras_cv/src/layers/regularization/squeeze_excite_test.py @@ -25,7 +25,7 @@ def test_maintains_shape(self): layer = SqueezeAndExcite2D(8, 2) outputs = layer(inputs) - self.assertEquals(inputs.shape, outputs.shape) + self.assertEqual(inputs.shape, outputs.shape) def test_custom_activation(self): def custom_activation(x): @@ -41,7 +41,7 @@ def custom_activation(x): excite_activation=custom_activation, ) outputs = layer(inputs) - self.assertEquals(inputs.shape, outputs.shape) + self.assertEqual(inputs.shape, outputs.shape) def test_raises_invalid_ratio_error(self): with self.assertRaisesRegex( diff --git a/keras_cv/src/layers/spatial_pyramid_test.py b/keras_cv/src/layers/spatial_pyramid_test.py index e8d14c74b0..551728a643 100644 --- a/keras_cv/src/layers/spatial_pyramid_test.py +++ b/keras_cv/src/layers/spatial_pyramid_test.py @@ -25,7 +25,7 @@ def test_return_type_and_shape(self): inputs = c4 output = layer(inputs, training=True) - self.assertEquals(output.shape, (2, 16, 16, 256)) + self.assertEqual(output.shape, (2, 16, 16, 256)) def test_with_keras_tensor(self): layer = SpatialPyramidPooling(dilation_rates=[6, 12, 18]) @@ -33,4 +33,4 @@ def test_with_keras_tensor(self): inputs = c4 output = layer(inputs, training=True) - self.assertEquals(output.shape, (None, 16, 16, 256)) + self.assertEqual(output.shape, (None, 16, 16, 256)) diff --git a/keras_cv/src/layers/transformer_encoder_test.py b/keras_cv/src/layers/transformer_encoder_test.py index 097983453b..17ca28c6ea 100644 --- a/keras_cv/src/layers/transformer_encoder_test.py +++ b/keras_cv/src/layers/transformer_encoder_test.py @@ -26,14 +26,14 @@ def test_return_type_and_shape(self): output = layer(inputs, training=True) self.assertTrue(isinstance(output, tf.Tensor)) self.assertLen(output, 1) - self.assertEquals(output.shape, [1, 197, 128]) + self.assertEqual(output.shape, [1, 197, 128]) def test_wrong_input_dims(self): layer = TransformerEncoder(project_dim=128, num_heads=2, mlp_dim=128) # Input dims must equal output dims because of the addition # of the residual to the final layer inputs = tf.random.normal([1, 197, 256]) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "The input and output dimensionality must be the same, but the " "TransformerEncoder was provided with 256 and 128", @@ -45,7 +45,7 @@ def test_wrong_project_dims(self): # Input dims must equal output dims because of the addition # of the residual to the final layer inputs = tf.random.normal([1, 197, 128]) - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "The input and output dimensionality must be the same, but the " "TransformerEncoder was provided with 128 and 256", diff --git a/keras_cv/src/layers/vit_layers_test.py b/keras_cv/src/layers/vit_layers_test.py index ab269c8403..c06bf6fcd5 100644 --- a/keras_cv/src/layers/vit_layers_test.py +++ b/keras_cv/src/layers/vit_layers_test.py @@ -20,14 +20,14 @@ class ViTLayersTest(TestCase): def test_patching_wrong_patch_size(self): - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "The patch_size cannot be a negative number. Received -16", ): PatchingAndEmbedding(project_dim=16, patch_size=-16) def test_patching_wrong_padding(self): - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "Padding must be either 'SAME' or 'VALID', but REFLECT was passed.", ): @@ -41,7 +41,7 @@ def test_patch_embedding_return_type_and_shape(self): output = layer(inputs) self.assertTrue(isinstance(output, tf.Tensor)) self.assertLen(output, 1) - self.assertEquals(output.shape, [1, 197, 128]) + self.assertEqual(output.shape, [1, 197, 128]) def test_patch_embedding_interpolation(self): inputs = np.ones([1, 224, 224, 3]) @@ -58,7 +58,7 @@ def test_patch_embedding_interpolation(self): self.assertTrue(isinstance(output, tf.Tensor)) self.assertLen(output, 1) - self.assertEquals(output.shape, [1, 1369, 128]) + self.assertEqual(output.shape, [1, 1369, 128]) def test_patch_embedding_interpolation_numerical(self): inputs = np.ones([1, 4, 4, 3]) diff --git a/keras_cv/src/models/backbones/csp_darknet/csp_darknet_backbone_test.py b/keras_cv/src/models/backbones/csp_darknet/csp_darknet_backbone_test.py index 099db3064d..1ec49c962d 100644 --- a/keras_cv/src/models/backbones/csp_darknet/csp_darknet_backbone_test.py +++ b/keras_cv/src/models/backbones/csp_darknet/csp_darknet_backbone_test.py @@ -123,20 +123,20 @@ def test_feature_pyramid_inputs(self): inputs = keras.Input(shape=[input_size, input_size, 3]) outputs = backbone_model(inputs) levels = ["P2", "P3", "P4", "P5"] - self.assertEquals(list(outputs.keys()), levels) - self.assertEquals( + self.assertEqual(list(outputs.keys()), levels) + self.assertEqual( outputs["P2"].shape, (None, input_size // 2**2, input_size // 2**2, 128), ) - self.assertEquals( + self.assertEqual( outputs["P3"].shape, (None, input_size // 2**3, input_size // 2**3, 256), ) - self.assertEquals( + self.assertEqual( outputs["P4"].shape, (None, input_size // 2**4, input_size // 2**4, 512), ) - self.assertEquals( + self.assertEqual( outputs["P5"].shape, (None, input_size // 2**5, input_size // 2**5, 1024), ) diff --git a/keras_cv/src/models/backbones/densenet/densenet_backbone_test.py b/keras_cv/src/models/backbones/densenet/densenet_backbone_test.py index dd160d035e..1f4924c1a1 100644 --- a/keras_cv/src/models/backbones/densenet/densenet_backbone_test.py +++ b/keras_cv/src/models/backbones/densenet/densenet_backbone_test.py @@ -105,20 +105,20 @@ def test_feature_pyramid_inputs(self): inputs = keras.Input(shape=[input_size, input_size, 3]) outputs = backbone_model(inputs) levels = ["P2", "P3", "P4", "P5"] - self.assertEquals(list(outputs.keys()), levels) - self.assertEquals( + self.assertEqual(list(outputs.keys()), levels) + self.assertEqual( outputs["P2"].shape, (None, input_size // 2**2, input_size // 2**2, 256), ) - self.assertEquals( + self.assertEqual( outputs["P3"].shape, (None, input_size // 2**3, input_size // 2**3, 512), ) - self.assertEquals( + self.assertEqual( outputs["P4"].shape, (None, input_size // 2**4, input_size // 2**4, 1024), ) - self.assertEquals( + self.assertEqual( outputs["P5"].shape, (None, input_size // 2**5, input_size // 2**5, 1024), ) diff --git a/keras_cv/src/models/backbones/efficientnet_lite/efficientnet_lite_backbone_presets_test.py b/keras_cv/src/models/backbones/efficientnet_lite/efficientnet_lite_backbone_presets_test.py index f581dd3962..b44af253a4 100644 --- a/keras_cv/src/models/backbones/efficientnet_lite/efficientnet_lite_backbone_presets_test.py +++ b/keras_cv/src/models/backbones/efficientnet_lite/efficientnet_lite_backbone_presets_test.py @@ -55,6 +55,6 @@ def test_efficientnetlite_feature_extractor(self): inputs = keras.Input(shape=[256, 256, 3]) outputs = backbone_model(inputs) self.assertLen(outputs, 2) - self.assertEquals(list(outputs.keys()), levels) - self.assertEquals(outputs["P3"].shape[:3], (None, 32, 32)) - self.assertEquals(outputs["P4"].shape[:3], (None, 16, 16)) + self.assertEqual(list(outputs.keys()), levels) + self.assertEqual(outputs["P3"].shape[:3], (None, 32, 32)) + self.assertEqual(outputs["P4"].shape[:3], (None, 16, 16)) diff --git a/keras_cv/src/models/backbones/efficientnet_lite/efficientnet_lite_backbone_test.py b/keras_cv/src/models/backbones/efficientnet_lite/efficientnet_lite_backbone_test.py index 9bbd97c827..e2e90c39f5 100644 --- a/keras_cv/src/models/backbones/efficientnet_lite/efficientnet_lite_backbone_test.py +++ b/keras_cv/src/models/backbones/efficientnet_lite/efficientnet_lite_backbone_test.py @@ -121,24 +121,24 @@ def test_feature_pyramid_inputs(self): inputs = keras.Input(shape=[input_size, input_size, 3]) outputs = backbone_model(inputs) levels = ["P1", "P2", "P3", "P4", "P5"] - self.assertEquals(list(outputs.keys()), levels) - self.assertEquals( + self.assertEqual(list(outputs.keys()), levels) + self.assertEqual( outputs["P1"].shape, (None, input_size // 2**1, input_size // 2**1, 16), ) - self.assertEquals( + self.assertEqual( outputs["P2"].shape, (None, input_size // 2**2, input_size // 2**2, 24), ) - self.assertEquals( + self.assertEqual( outputs["P3"].shape, (None, input_size // 2**3, input_size // 2**3, 40), ) - self.assertEquals( + self.assertEqual( outputs["P4"].shape, (None, input_size // 2**4, input_size // 2**4, 112), ) - self.assertEquals( + self.assertEqual( outputs["P5"].shape, (None, input_size // 2**5, input_size // 2**5, 1280), ) diff --git a/keras_cv/src/models/backbones/efficientnet_v1/efficientnet_v1_backbone_presets_test.py b/keras_cv/src/models/backbones/efficientnet_v1/efficientnet_v1_backbone_presets_test.py index ad1f3ebf47..a4ded09dc0 100644 --- a/keras_cv/src/models/backbones/efficientnet_v1/efficientnet_v1_backbone_presets_test.py +++ b/keras_cv/src/models/backbones/efficientnet_v1/efficientnet_v1_backbone_presets_test.py @@ -55,6 +55,6 @@ def test_efficientnet_feature_extractor(self): inputs = keras.Input(shape=[256, 256, 3]) outputs = backbone_model(inputs) self.assertLen(outputs, 2) - self.assertEquals(list(outputs.keys()), levels) - self.assertEquals(outputs["P3"].shape[:3], (None, 32, 32)) - self.assertEquals(outputs["P4"].shape[:3], (None, 16, 16)) + self.assertEqual(list(outputs.keys()), levels) + self.assertEqual(outputs["P3"].shape[:3], (None, 32, 32)) + self.assertEqual(outputs["P4"].shape[:3], (None, 16, 16)) diff --git a/keras_cv/src/models/backbones/efficientnet_v1/efficientnet_v1_backbone_test.py b/keras_cv/src/models/backbones/efficientnet_v1/efficientnet_v1_backbone_test.py index 668e8107c7..220d90e28f 100644 --- a/keras_cv/src/models/backbones/efficientnet_v1/efficientnet_v1_backbone_test.py +++ b/keras_cv/src/models/backbones/efficientnet_v1/efficientnet_v1_backbone_test.py @@ -148,24 +148,24 @@ def test_feature_pyramid_inputs(self): inputs = keras.Input(shape=[input_size, input_size, 3]) outputs = backbone_model(inputs) levels = ["P1", "P2", "P3", "P4", "P5"] - self.assertEquals(list(outputs.keys()), levels) - self.assertEquals( + self.assertEqual(list(outputs.keys()), levels) + self.assertEqual( outputs["P1"].shape, (None, input_size // 2**1, input_size // 2**1, 16), ) - self.assertEquals( + self.assertEqual( outputs["P2"].shape, (None, input_size // 2**2, input_size // 2**2, 24), ) - self.assertEquals( + self.assertEqual( outputs["P3"].shape, (None, input_size // 2**3, input_size // 2**3, 40), ) - self.assertEquals( + self.assertEqual( outputs["P4"].shape, (None, input_size // 2**4, input_size // 2**4, 112), ) - self.assertEquals( + self.assertEqual( outputs["P5"].shape, (None, input_size // 2**5, input_size // 2**5, 1280), ) diff --git a/keras_cv/src/models/backbones/efficientnet_v2/efficientnet_v2_backbone_presets_test.py b/keras_cv/src/models/backbones/efficientnet_v2/efficientnet_v2_backbone_presets_test.py index 2bb9c527ca..583d0d3ef6 100644 --- a/keras_cv/src/models/backbones/efficientnet_v2/efficientnet_v2_backbone_presets_test.py +++ b/keras_cv/src/models/backbones/efficientnet_v2/efficientnet_v2_backbone_presets_test.py @@ -56,6 +56,6 @@ def test_efficientnet_feature_extractor(self): inputs = keras.Input(shape=[256, 256, 3]) outputs = backbone_model(inputs) self.assertLen(outputs, 2) - self.assertEquals(list(outputs.keys()), levels) - self.assertEquals(outputs["P3"].shape[:3], (None, 32, 32)) - self.assertEquals(outputs["P4"].shape[:3], (None, 16, 16)) + self.assertEqual(list(outputs.keys()), levels) + self.assertEqual(outputs["P3"].shape[:3], (None, 32, 32)) + self.assertEqual(outputs["P4"].shape[:3], (None, 16, 16)) diff --git a/keras_cv/src/models/backbones/efficientnet_v2/efficientnet_v2_backbone_test.py b/keras_cv/src/models/backbones/efficientnet_v2/efficientnet_v2_backbone_test.py index 15ffd13da0..0577d9a8bb 100644 --- a/keras_cv/src/models/backbones/efficientnet_v2/efficientnet_v2_backbone_test.py +++ b/keras_cv/src/models/backbones/efficientnet_v2/efficientnet_v2_backbone_test.py @@ -155,24 +155,24 @@ def test_feature_pyramid_inputs(self): inputs = keras.Input(shape=[input_size, input_size, 3]) outputs = backbone_model(inputs) levels = ["P1", "P2", "P3", "P4", "P5"] - self.assertEquals(list(outputs.keys()), levels) - self.assertEquals( + self.assertEqual(list(outputs.keys()), levels) + self.assertEqual( outputs["P1"].shape, (None, input_size // 2**1, input_size // 2**1, 24), ) - self.assertEquals( + self.assertEqual( outputs["P2"].shape, (None, input_size // 2**2, input_size // 2**2, 48), ) - self.assertEquals( + self.assertEqual( outputs["P3"].shape, (None, input_size // 2**3, input_size // 2**3, 64), ) - self.assertEquals( + self.assertEqual( outputs["P4"].shape, (None, input_size // 2**4, input_size // 2**4, 160), ) - self.assertEquals( + self.assertEqual( outputs["P5"].shape, (None, input_size // 2**5, input_size // 2**5, 1280), ) diff --git a/keras_cv/src/models/backbones/mobilenet_v3/mobilenet_v3_backbone_test.py b/keras_cv/src/models/backbones/mobilenet_v3/mobilenet_v3_backbone_test.py index 40959e68fe..4efa087dfa 100644 --- a/keras_cv/src/models/backbones/mobilenet_v3/mobilenet_v3_backbone_test.py +++ b/keras_cv/src/models/backbones/mobilenet_v3/mobilenet_v3_backbone_test.py @@ -77,24 +77,24 @@ def test_feature_pyramid_inputs(self): inputs = keras.Input(shape=[input_size, input_size, 3]) outputs = backbone_model(inputs) levels = ["P1", "P2", "P3", "P4", "P5"] - self.assertEquals(list(outputs.keys()), levels) - self.assertEquals( + self.assertEqual(list(outputs.keys()), levels) + self.assertEqual( outputs["P1"].shape, (None, input_size // 2**1, input_size // 2**1, 16), ) - self.assertEquals( + self.assertEqual( outputs["P2"].shape, (None, input_size // 2**2, input_size // 2**2, 16), ) - self.assertEquals( + self.assertEqual( outputs["P3"].shape, (None, input_size // 2**3, input_size // 2**3, 24), ) - self.assertEquals( + self.assertEqual( outputs["P4"].shape, (None, input_size // 2**4, input_size // 2**4, 48), ) - self.assertEquals( + self.assertEqual( outputs["P5"].shape, (None, input_size // 2**5, input_size // 2**5, 96), ) diff --git a/keras_cv/src/models/backbones/resnet_v1/resnet_v1_backbone_test.py b/keras_cv/src/models/backbones/resnet_v1/resnet_v1_backbone_test.py index a49d16d620..1aa298d1b3 100644 --- a/keras_cv/src/models/backbones/resnet_v1/resnet_v1_backbone_test.py +++ b/keras_cv/src/models/backbones/resnet_v1/resnet_v1_backbone_test.py @@ -123,20 +123,20 @@ def test_feature_pyramid_inputs(self): inputs = keras.Input(shape=[input_size, input_size, 3]) outputs = backbone_model(inputs) levels = ["P2", "P3", "P4", "P5"] - self.assertEquals(list(outputs.keys()), levels) - self.assertEquals( + self.assertEqual(list(outputs.keys()), levels) + self.assertEqual( outputs["P2"].shape, (None, input_size // 2**2, input_size // 2**2, 256), ) - self.assertEquals( + self.assertEqual( outputs["P3"].shape, (None, input_size // 2**3, input_size // 2**3, 512), ) - self.assertEquals( + self.assertEqual( outputs["P4"].shape, (None, input_size // 2**4, input_size // 2**4, 1024), ) - self.assertEquals( + self.assertEqual( outputs["P5"].shape, (None, input_size // 2**5, input_size // 2**5, 2048), ) diff --git a/keras_cv/src/models/backbones/resnet_v2/resnet_v2_backbone_test.py b/keras_cv/src/models/backbones/resnet_v2/resnet_v2_backbone_test.py index 9c47f97aae..a501c75a76 100644 --- a/keras_cv/src/models/backbones/resnet_v2/resnet_v2_backbone_test.py +++ b/keras_cv/src/models/backbones/resnet_v2/resnet_v2_backbone_test.py @@ -113,20 +113,20 @@ def test_feature_pyramid_inputs(self): inputs = keras.Input(shape=[input_size, input_size, 3]) outputs = backbone_model(inputs) levels = ["P2", "P3", "P4", "P5"] - self.assertEquals(list(outputs.keys()), levels) - self.assertEquals( + self.assertEqual(list(outputs.keys()), levels) + self.assertEqual( outputs["P2"].shape, (None, input_size // 2**2, input_size // 2**2, 256), ) - self.assertEquals( + self.assertEqual( outputs["P3"].shape, (None, input_size // 2**3, input_size // 2**3, 512), ) - self.assertEquals( + self.assertEqual( outputs["P4"].shape, (None, input_size // 2**4, input_size // 2**4, 1024), ) - self.assertEquals( + self.assertEqual( outputs["P5"].shape, (None, input_size // 2**5, input_size // 2**5, 2048), ) diff --git a/keras_cv/src/models/object_detection/faster_rcnn/feature_pyamid_test.py b/keras_cv/src/models/object_detection/faster_rcnn/feature_pyamid_test.py index 7292a1837d..d2750ef6be 100644 --- a/keras_cv/src/models/object_detection/faster_rcnn/feature_pyamid_test.py +++ b/keras_cv/src/models/object_detection/faster_rcnn/feature_pyamid_test.py @@ -33,7 +33,7 @@ def test_return_type_dict(self): inputs = {"P2": c2, "P3": c3, "P4": c4, "P5": c5} output = layer(inputs) self.assertTrue(isinstance(output, dict)) - self.assertEquals(sorted(output.keys()), ["P2", "P3", "P4", "P5", "P6"]) + self.assertEqual(sorted(output.keys()), ["P2", "P3", "P4", "P5", "P6"]) @pytest.mark.skipif(not keras_3(), reason="disabling test for Keras 2") def test_result_shapes(self): @@ -46,9 +46,9 @@ def test_result_shapes(self): inputs = {"P2": c2, "P3": c3, "P4": c4, "P5": c5} output = layer(inputs) for level in inputs.keys(): - self.assertEquals(output[level].shape[1], inputs[level].shape[1]) - self.assertEquals(output[level].shape[2], inputs[level].shape[2]) - self.assertEquals(output[level].shape[3], layer.num_channels) + self.assertEqual(output[level].shape[1], inputs[level].shape[1]) + self.assertEqual(output[level].shape[2], inputs[level].shape[2]) + self.assertEqual(output[level].shape[3], layer.num_channels) # Test with different resolution and channel size c2 = np.ones([2, 64, 128, 4]) @@ -60,9 +60,9 @@ def test_result_shapes(self): layer = FeaturePyramid(min_level=2, max_level=5) output = layer(inputs) for level in inputs.keys(): - self.assertEquals(output[level].shape[1], inputs[level].shape[1]) - self.assertEquals(output[level].shape[2], inputs[level].shape[2]) - self.assertEquals(output[level].shape[3], layer.num_channels) + self.assertEqual(output[level].shape[1], inputs[level].shape[1]) + self.assertEqual(output[level].shape[2], inputs[level].shape[2]) + self.assertEqual(output[level].shape[3], layer.num_channels) @pytest.mark.skipif(not keras_3(), reason="disabling test for Keras 2") def test_with_keras_input_tensor(self): @@ -76,14 +76,14 @@ def test_with_keras_input_tensor(self): inputs = {"P2": c2, "P3": c3, "P4": c4, "P5": c5} output = layer(inputs) for level in inputs.keys(): - self.assertEquals(output[level].shape[1], inputs[level].shape[1]) - self.assertEquals(output[level].shape[2], inputs[level].shape[2]) - self.assertEquals(output[level].shape[3], layer.num_channels) + self.assertEqual(output[level].shape[1], inputs[level].shape[1]) + self.assertEqual(output[level].shape[2], inputs[level].shape[2]) + self.assertEqual(output[level].shape[3], layer.num_channels) @pytest.mark.skipif(not keras_3(), reason="disabling test for Keras 2") def test_invalid_lateral_layers(self): lateral_layers = [keras.layers.Conv2D(256, 1)] * 3 - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "Expect lateral_layers to be a dict" ): _ = FeaturePyramid( @@ -94,7 +94,7 @@ def test_invalid_lateral_layers(self): "P3": keras.layers.Conv2D(256, 1), "P4": keras.layers.Conv2D(256, 1), } - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "with keys as .* ['P2', 'P3', 'P4', 'P5']" ): _ = FeaturePyramid( @@ -104,7 +104,7 @@ def test_invalid_lateral_layers(self): @pytest.mark.skipif(not keras_3(), reason="disabling test for Keras 2") def test_invalid_output_layers(self): output_layers = [keras.layers.Conv2D(256, 3)] * 3 - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "Expect output_layers to be a dict" ): _ = FeaturePyramid( @@ -115,7 +115,7 @@ def test_invalid_output_layers(self): "P3": keras.layers.Conv2D(256, 3), "P4": keras.layers.Conv2D(256, 3), } - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "with keys as .* ['P2', 'P3', 'P4', 'P5']" ): _ = FeaturePyramid( @@ -134,13 +134,13 @@ def test_invalid_input_features(self): # Build required for Keas 3 _ = layer(inputs) list_input = [c2, c3, c4, c5] - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "expects input features to be a dict" ): layer(list_input) dict_input_with_missing_feature = {"P2": c2, "P3": c3, "P4": c4} - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "Expect feature keys.*['P2', 'P3', 'P4', 'P5']" ): layer(dict_input_with_missing_feature) diff --git a/keras_cv/src/models/object_detection/faster_rcnn/rpn_head_test.py b/keras_cv/src/models/object_detection/faster_rcnn/rpn_head_test.py index 56a11af706..d23d1aa915 100644 --- a/keras_cv/src/models/object_detection/faster_rcnn/rpn_head_test.py +++ b/keras_cv/src/models/object_detection/faster_rcnn/rpn_head_test.py @@ -38,10 +38,10 @@ def test_return_type_dict( rpn_boxes, rpn_scores = layer(inputs) self.assertTrue(isinstance(rpn_boxes, dict)) self.assertTrue(isinstance(rpn_scores, dict)) - self.assertEquals( + self.assertEqual( sorted(rpn_boxes.keys()), ["P2", "P3", "P4", "P5", "P6"] ) - self.assertEquals( + self.assertEqual( sorted(rpn_scores.keys()), ["P2", "P3", "P4", "P5", "P6"] ) @@ -75,15 +75,15 @@ def test_with_keras_input_tensor_and_num_anchors(self, num_anchors): inputs = {"P2": c2, "P3": c3, "P4": c4, "P5": c5, "P6": c6} rpn_boxes, rpn_scores = layer(inputs) for level in inputs.keys(): - self.assertEquals(rpn_boxes[level].shape[1], inputs[level].shape[1]) - self.assertEquals(rpn_boxes[level].shape[2], inputs[level].shape[2]) - self.assertEquals(rpn_boxes[level].shape[3], layer.num_anchors * 4) + self.assertEqual(rpn_boxes[level].shape[1], inputs[level].shape[1]) + self.assertEqual(rpn_boxes[level].shape[2], inputs[level].shape[2]) + self.assertEqual(rpn_boxes[level].shape[3], layer.num_anchors * 4) for level in inputs.keys(): - self.assertEquals( + self.assertEqual( rpn_scores[level].shape[1], inputs[level].shape[1] ) - self.assertEquals( + self.assertEqual( rpn_scores[level].shape[2], inputs[level].shape[2] ) - self.assertEquals(rpn_scores[level].shape[3], layer.num_anchors * 1) + self.assertEqual(rpn_scores[level].shape[3], layer.num_anchors * 1) diff --git a/keras_cv/src/models/object_detection/yolox/layers/yolox_label_encoder_test.py b/keras_cv/src/models/object_detection/yolox/layers/yolox_label_encoder_test.py index 8bc5660185..0c7c87b583 100644 --- a/keras_cv/src/models/object_detection/yolox/layers/yolox_label_encoder_test.py +++ b/keras_cv/src/models/object_detection/yolox/layers/yolox_label_encoder_test.py @@ -32,7 +32,7 @@ def test_ragged_images_exception(self): ) layer = YoloXLabelEncoder() - with self.assertRaisesRegexp( + with self.assertRaisesRegex( ValueError, "method does not support RaggedTensor inputs for the `images` " "argument.",