mirror of
https://github.com/google-deepmind/deepmind-research.git
synced 2026-02-06 03:32:18 +08:00
Update requirements versions, change tf.compat.v1. usages to tf.
PiperOrigin-RevId: 288659456
This commit is contained in:
committed by
Diego de Las Casas
parent
a506a4274e
commit
2160fc7f17
@@ -71,10 +71,10 @@ def evaluate(crop_size_x, crop_size_y, feature_normalization, checkpoint_path,
|
||||
normalization_exclusion=normalization_exclusion)
|
||||
|
||||
checkpoint = snt.get_saver(experiment.model, collections=[
|
||||
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES,
|
||||
tf.compat.v1.GraphKeys.MOVING_AVERAGE_VARIABLES])
|
||||
tf.GraphKeys.GLOBAL_VARIABLES,
|
||||
tf.GraphKeys.MOVING_AVERAGE_VARIABLES])
|
||||
|
||||
with tf.compat.v1.train.SingularMonitoredSession(hooks=[]) as sess:
|
||||
with tf.train.SingularMonitoredSession(hooks=[]) as sess:
|
||||
logging.info('Restoring from checkpoint %s', checkpoint_path)
|
||||
checkpoint.restore(sess, checkpoint_path)
|
||||
|
||||
|
||||
@@ -167,7 +167,7 @@ def parse_tfexample(raw_data, features):
|
||||
for k, v in parsed_features.items():
|
||||
new_shape = shape(feature_name=k, num_residues=num_residues)
|
||||
# Make sure the feature we are reshaping is not empty.
|
||||
assert_non_empty = tf.compat.v1.assert_greater(
|
||||
assert_non_empty = tf.assert_greater(
|
||||
tf.size(v), 0, name='assert_%s_non_empty' % k,
|
||||
message='The feature %s is not set in the tf.Example. Either do not '
|
||||
'request the feature or use a tf.Example that has the feature set.' % k)
|
||||
@@ -245,7 +245,7 @@ def normalize_from_stats_file(
|
||||
train_mean = tf.cast(norm_stats['mean'][key], dtype=tf.float32)
|
||||
train_range = tf.sqrt(tf.cast(norm_stats['var'][key], dtype=tf.float32))
|
||||
value -= train_mean
|
||||
value = tf.compat.v1.where(
|
||||
value = tf.where(
|
||||
train_range > range_epsilon, value / train_range, value)
|
||||
features[key] = value
|
||||
else:
|
||||
|
||||
@@ -22,12 +22,12 @@ from alphafold_casp13 import contacts_network
|
||||
|
||||
|
||||
def _int_ph(shape, name):
|
||||
return tf.compat.v1.placeholder(
|
||||
return tf.placeholder(
|
||||
dtype=tf.int32, shape=shape, name=('%s_placeholder' % name))
|
||||
|
||||
|
||||
def _float_ph(shape, name):
|
||||
return tf.compat.v1.placeholder(
|
||||
return tf.placeholder(
|
||||
dtype=tf.float32, shape=shape, name=('%s_placeholder' % name))
|
||||
|
||||
|
||||
@@ -102,7 +102,7 @@ class Contacts(object):
|
||||
dataset = dataset.batch(1)
|
||||
|
||||
# Get a batch of tensors in the legacy ProteinsDataset format.
|
||||
iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
|
||||
iterator = tf.data.make_one_shot_iterator(dataset)
|
||||
self._input_batch = iterator.get_next()
|
||||
|
||||
self.num_eval_examples = sum(
|
||||
|
||||
@@ -41,7 +41,7 @@ def call_on_tuple(f):
|
||||
|
||||
|
||||
class ContactsNet(sonnet.AbstractModule):
|
||||
"""A network to go from sequence to secondary structure."""
|
||||
"""A network to go from sequence to distance histograms."""
|
||||
|
||||
def __init__(self,
|
||||
binary_code_bits,
|
||||
@@ -102,7 +102,7 @@ class ContactsNet(sonnet.AbstractModule):
|
||||
if self.asa_multiplier > 0:
|
||||
self._asa = asa_output.ASAOutputLayer()
|
||||
if self._position_specific_bias_size:
|
||||
self._position_specific_bias = tf.compat.v1.get_variable(
|
||||
self._position_specific_bias = tf.get_variable(
|
||||
'position_specific_bias',
|
||||
[self._position_specific_bias_size, self._num_bins or 1],
|
||||
initializer=tf.zeros_initializer())
|
||||
@@ -338,7 +338,7 @@ class ContactsNet(sonnet.AbstractModule):
|
||||
layers_forward = None
|
||||
if config_2d_deep.extra_blocks:
|
||||
# Optionally put some extra double-size blocks at the beginning.
|
||||
with tf.compat.v1.variable_scope('Deep2DExtra'):
|
||||
with tf.variable_scope('Deep2DExtra'):
|
||||
hidden_2d = two_dim_resnet.make_two_dim_resnet(
|
||||
input_node=hidden_2d,
|
||||
num_residues=None, # Unused
|
||||
@@ -362,7 +362,7 @@ class ContactsNet(sonnet.AbstractModule):
|
||||
if features_forward is not None:
|
||||
hidden_2d = tf.concat([hidden_2d, features_forward], 1
|
||||
if data_format == 'NCHW' else 3)
|
||||
with tf.compat.v1.variable_scope('Deep2D'):
|
||||
with tf.variable_scope('Deep2D'):
|
||||
logging.info('2d hidden shape is %s', str(hidden_2d.shape.as_list()))
|
||||
contact_pre_logits = two_dim_resnet.make_two_dim_resnet(
|
||||
input_node=hidden_2d,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
absl-py>=0.8.0
|
||||
numpy>=1.16
|
||||
six>=1.12
|
||||
dm-sonnet>=1.35
|
||||
setuptools==41.0.0
|
||||
absl-py==0.8.1
|
||||
numpy==1.16
|
||||
six==1.12
|
||||
dm-sonnet==1.35
|
||||
tensorflow==1.14
|
||||
tensorflow-probability==0.7.0
|
||||
|
||||
@@ -55,7 +55,7 @@ class Secstruct(object):
|
||||
|
||||
def make_layer_new(self, activations):
|
||||
"""Make the layer."""
|
||||
with tf.compat.v1.variable_scope(self.name, reuse=tf.AUTO_REUSE):
|
||||
with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):
|
||||
logging.info('Creating secstruct %s', activations)
|
||||
self.logits = contrib_layers.linear(activations, self._dimension)
|
||||
self.ss_q8_probs = tf.nn.softmax(self.logits)
|
||||
|
||||
@@ -22,12 +22,12 @@ from tensorflow.contrib import layers as contrib_layers
|
||||
def weight_variable(shape, stddev=0.01):
|
||||
"""Returns the weight variable."""
|
||||
logging.vlog(1, 'weight init for shape %s', str(shape))
|
||||
return tf.compat.v1.get_variable(
|
||||
return tf.get_variable(
|
||||
'w', shape, initializer=tf.random_normal_initializer(stddev=stddev))
|
||||
|
||||
|
||||
def bias_variable(shape):
|
||||
return tf.compat.v1.get_variable(
|
||||
return tf.get_variable(
|
||||
'b', shape, initializer=tf.zeros_initializer())
|
||||
|
||||
|
||||
@@ -61,12 +61,12 @@ def make_conv_sep2d_layer(input_node,
|
||||
filter_size_2 = filter_size
|
||||
logging.vlog(1, 'layer %s in %d out %d chan mult %d', layer_name, in_channels,
|
||||
out_channels, channel_multiplier)
|
||||
with tf.compat.v1.variable_scope(layer_name):
|
||||
with tf.compat.v1.variable_scope('depthwise'):
|
||||
with tf.variable_scope(layer_name):
|
||||
with tf.variable_scope('depthwise'):
|
||||
w_depthwise = weight_variable(
|
||||
[filter_size, filter_size_2, in_channels, channel_multiplier],
|
||||
stddev=stddev)
|
||||
with tf.compat.v1.variable_scope('pointwise'):
|
||||
with tf.variable_scope('pointwise'):
|
||||
w_pointwise = weight_variable(
|
||||
[1, 1, in_channels * channel_multiplier, out_channels], stddev=stddev)
|
||||
h_conv = tf.nn.separable_conv2d(
|
||||
@@ -119,7 +119,7 @@ def make_conv_layer(input_node,
|
||||
filter_size_2 = filter_size
|
||||
logging.vlog(
|
||||
1, 'layer %s in %d out %d', layer_name, in_channels, out_channels)
|
||||
with tf.compat.v1.variable_scope(layer_name):
|
||||
with tf.variable_scope(layer_name):
|
||||
w_conv = weight_variable(
|
||||
[filter_size, filter_size_2, in_channels, out_channels], stddev=stddev)
|
||||
h_conv = conv2d(
|
||||
|
||||
Reference in New Issue
Block a user