1616except :
1717 from scikeras .wrappers import KerasClassifier , KerasRegressor
1818import tensorflow as tf
19- import tensorflow .keras .backend as K
2019from tensorflow .keras import Sequential , Model
2120from tensorflow .keras .layers import Layer , Dense , Flatten , Input
2221from tensorflow .keras .models import clone_model
@@ -88,24 +87,25 @@ def accuracy(y_true, y_pred):
8887 Boolean Tensor
8988 """
9089 # TODO: accuracy can't handle 1D ys.
91- multi_columns_t = K .cast (K .greater (K .shape (y_true )[1 ], 1 ),
92- "float32" )
93- binary_t = K .reshape (K .sum (K .cast (K .greater (y_true , 0.5 ),
94- "float32" ), axis = - 1 ), (- 1 ,))
95- multi_t = K .reshape (K .cast (K .argmax (y_true , axis = - 1 ),
96- "float32" ), (- 1 ,))
90+ dtype = y_pred .dtype
91+ multi_columns_t = tf .cast (tf .greater (tf .shape (y_true )[1 ], 1 ),
92+ dtype )
93+ binary_t = tf .reshape (tf .reduce_sum (tf .cast (tf .greater (y_true , 0.5 ),
94+ dtype ), axis = - 1 ), (- 1 ,))
95+ multi_t = tf .reshape (tf .cast (tf .math .argmax (y_true , axis = - 1 ),
96+ dtype ), (- 1 ,))
9797 y_true = ((1 - multi_columns_t ) * binary_t +
9898 multi_columns_t * multi_t )
9999
100- multi_columns_p = K .cast (K .greater (K .shape (y_pred )[1 ], 1 ),
101- "float32" )
102- binary_p = K .reshape (K . sum ( K .cast (K .greater (y_pred , 0.5 ),
103- "float32" ), axis = - 1 ), (- 1 ,))
104- multi_p = K .reshape (K .cast (K .argmax (y_pred , axis = - 1 ),
105- "float32" ), (- 1 ,))
100+ multi_columns_p = tf .cast (tf .greater (tf .shape (y_pred )[1 ], 1 ),
101+ dtype )
102+ binary_p = tf .reshape (tf . reduce_sum ( tf .cast (tf .greater (y_pred , 0.5 ),
103+ dtype ), axis = - 1 ), (- 1 ,))
104+ multi_p = tf .reshape (tf .cast (tf . math .argmax (y_pred , axis = - 1 ),
105+ dtype ), (- 1 ,))
106106 y_pred = ((1 - multi_columns_p ) * binary_p +
107- multi_columns_p * multi_p )
108- return tf .keras . metrics . get ( "acc" )( y_true , y_pred )
107+ multi_columns_p * multi_p )
108+ return tf .cast ( tf . math . equal ( y_true , y_pred ), dtype )
109109
110110
111111def predict (self , x , ** kwargs ):
@@ -259,11 +259,11 @@ def check_network(network, copy=True,
259259 # but no input_shape
260260 if hasattr (network , "input_shape" ):
261261 shape = network .input_shape [1 :]
262- new_network = clone_model (network , input_tensors = Input ( shape ) )
262+ new_network = clone_model (network )
263263 new_network .set_weights (network .get_weights ())
264264 elif network .built :
265265 shape = network ._build_input_shape [1 :]
266- new_network = clone_model (network , input_tensors = Input ( shape ) )
266+ new_network = clone_model (network )
267267 new_network .set_weights (network .get_weights ())
268268 else :
269269 new_network = clone_model (network )
@@ -284,7 +284,7 @@ def check_network(network, copy=True,
284284 new_network ._name = name
285285
286286 # Override the predict method to speed the prediction for small dataset
287- new_network .predict = predict .__get__ (new_network )
287+ # new_network.predict = predict.__get__(new_network)
288288 return new_network
289289
290290
@@ -366,62 +366,6 @@ def get_default_discriminator(name=None, state=None):
366366 return model
367367
368368
369- @tf .custom_gradient
370- def _grad_handler (x , lambda_ ):
371- y = tf .identity (x )
372- def custom_grad (dy ):
373- return (lambda_ * dy , 0. * lambda_ )
374- return y , custom_grad
375-
376- class GradientHandler (Layer ):
377- """
378- Multiply gradients with a scalar during backpropagation.
379-
380- Act as identity in forward step.
381-
382- Parameters
383- ----------
384- lambda_init : float (default=1.)
385- Scalar multiplier
386- """
387- def __init__ (self , lambda_init = 1. , name = "g_handler" ):
388- super ().__init__ (name = name )
389- self .lambda_init = lambda_init
390- self .lambda_ = tf .Variable (lambda_init ,
391- trainable = False ,
392- dtype = "float32" )
393-
394- def call (self , x ):
395- """
396- Call gradient handler.
397-
398- Parameters
399- ----------
400- x: object
401- Inputs
402-
403- Returns
404- -------
405- x, custom gradient function
406- """
407- return _grad_handler (x , self .lambda_ )
408-
409-
410- def get_config (self ):
411- """
412- Return config dictionnary.
413-
414- Returns
415- -------
416- dict
417- """
418- config = super ().get_config ().copy ()
419- config .update ({
420- 'lambda_init' : self .lambda_init
421- })
422- return config
423-
424-
425369def make_classification_da (n_samples = 100 ,
426370 n_features = 2 ,
427371 random_state = 2 ):
@@ -638,8 +582,18 @@ def check_fitted_network(estimator):
638582 if isinstance (estimator , Model ):
639583 estimator .__deepcopy__ = __deepcopy__ .__get__ (estimator )
640584 return estimator
641-
642-
585+
586+
587+ def check_if_compiled (network ):
588+ """
589+ Check if the network is compiled.
590+ """
591+ if hasattr (network , "compiled" ) and network .compiled :
592+ return True
593+ elif hasattr (network , "_is_compiled" ) and networtf ._is_compiled :
594+ return True
595+ else :
596+ return False
643597
644598# Try to save the initial estimator if it is a Keras Model
645599# This is required for cloning the adapt method.
0 commit comments