mirror of
https://github.com/TheAlgorithms/Python.git
synced 2025-04-04 12:56:45 +00:00
Reenable files when TensorFlow supports the current Python (#8602)
* Remove python_version < "3.11" for tensorflow * Reenable neural_network/input_data.py_tf * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Try to fix ruff * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Try to fix ruff * Try to fix ruff * Try to fix ruff * Try to fix pre-commit * Try to fix * Fix * Fix * Reenable dynamic_programming/k_means_clustering_tensorflow.py_tf * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Try to fix ruff --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
parent
84b6852de8
commit
56a40eb3ee
@ -309,6 +309,7 @@
|
|||||||
* [Floyd Warshall](dynamic_programming/floyd_warshall.py)
|
* [Floyd Warshall](dynamic_programming/floyd_warshall.py)
|
||||||
* [Integer Partition](dynamic_programming/integer_partition.py)
|
* [Integer Partition](dynamic_programming/integer_partition.py)
|
||||||
* [Iterating Through Submasks](dynamic_programming/iterating_through_submasks.py)
|
* [Iterating Through Submasks](dynamic_programming/iterating_through_submasks.py)
|
||||||
|
* [K Means Clustering Tensorflow](dynamic_programming/k_means_clustering_tensorflow.py)
|
||||||
* [Knapsack](dynamic_programming/knapsack.py)
|
* [Knapsack](dynamic_programming/knapsack.py)
|
||||||
* [Longest Common Subsequence](dynamic_programming/longest_common_subsequence.py)
|
* [Longest Common Subsequence](dynamic_programming/longest_common_subsequence.py)
|
||||||
* [Longest Common Substring](dynamic_programming/longest_common_substring.py)
|
* [Longest Common Substring](dynamic_programming/longest_common_substring.py)
|
||||||
@ -685,6 +686,7 @@
|
|||||||
* [2 Hidden Layers Neural Network](neural_network/2_hidden_layers_neural_network.py)
|
* [2 Hidden Layers Neural Network](neural_network/2_hidden_layers_neural_network.py)
|
||||||
* [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py)
|
* [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py)
|
||||||
* [Convolution Neural Network](neural_network/convolution_neural_network.py)
|
* [Convolution Neural Network](neural_network/convolution_neural_network.py)
|
||||||
|
* [Input Data](neural_network/input_data.py)
|
||||||
* [Perceptron](neural_network/perceptron.py)
|
* [Perceptron](neural_network/perceptron.py)
|
||||||
* [Simple Neural Network](neural_network/simple_neural_network.py)
|
* [Simple Neural Network](neural_network/simple_neural_network.py)
|
||||||
|
|
||||||
|
@ -1,9 +1,10 @@
|
|||||||
import tensorflow as tf
|
|
||||||
from random import shuffle
|
from random import shuffle
|
||||||
|
|
||||||
|
import tensorflow as tf
|
||||||
from numpy import array
|
from numpy import array
|
||||||
|
|
||||||
|
|
||||||
def TFKMeansCluster(vectors, noofclusters):
|
def tf_k_means_cluster(vectors, noofclusters):
|
||||||
"""
|
"""
|
||||||
K-Means Clustering using TensorFlow.
|
K-Means Clustering using TensorFlow.
|
||||||
'vectors' should be a n*k 2-D NumPy array, where n is the number
|
'vectors' should be a n*k 2-D NumPy array, where n is the number
|
||||||
@ -30,7 +31,6 @@ def TFKMeansCluster(vectors, noofclusters):
|
|||||||
graph = tf.Graph()
|
graph = tf.Graph()
|
||||||
|
|
||||||
with graph.as_default():
|
with graph.as_default():
|
||||||
|
|
||||||
# SESSION OF COMPUTATION
|
# SESSION OF COMPUTATION
|
||||||
|
|
||||||
sess = tf.Session()
|
sess = tf.Session()
|
||||||
@ -95,8 +95,7 @@ def TFKMeansCluster(vectors, noofclusters):
|
|||||||
# iterations. To keep things simple, we will only do a set number of
|
# iterations. To keep things simple, we will only do a set number of
|
||||||
# iterations, instead of using a Stopping Criterion.
|
# iterations, instead of using a Stopping Criterion.
|
||||||
noofiterations = 100
|
noofiterations = 100
|
||||||
for iteration_n in range(noofiterations):
|
for _ in range(noofiterations):
|
||||||
|
|
||||||
##EXPECTATION STEP
|
##EXPECTATION STEP
|
||||||
##Based on the centroid locations till last iteration, compute
|
##Based on the centroid locations till last iteration, compute
|
||||||
##the _expected_ centroid assignments.
|
##the _expected_ centroid assignments.
|
@ -21,13 +21,10 @@ This module and all its submodules are deprecated.
|
|||||||
import collections
|
import collections
|
||||||
import gzip
|
import gzip
|
||||||
import os
|
import os
|
||||||
|
import urllib
|
||||||
|
|
||||||
import numpy
|
import numpy
|
||||||
from six.moves import urllib
|
from tensorflow.python.framework import dtypes, random_seed
|
||||||
from six.moves import xrange # pylint: disable=redefined-builtin
|
|
||||||
|
|
||||||
from tensorflow.python.framework import dtypes
|
|
||||||
from tensorflow.python.framework import random_seed
|
|
||||||
from tensorflow.python.platform import gfile
|
from tensorflow.python.platform import gfile
|
||||||
from tensorflow.python.util.deprecation import deprecated
|
from tensorflow.python.util.deprecation import deprecated
|
||||||
|
|
||||||
@ -46,16 +43,16 @@ def _read32(bytestream):
|
|||||||
def _extract_images(f):
|
def _extract_images(f):
|
||||||
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth].
|
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth].
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
f: A file object that can be passed into a gzip reader.
|
f: A file object that can be passed into a gzip reader.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
data: A 4D uint8 numpy array [index, y, x, depth].
|
data: A 4D uint8 numpy array [index, y, x, depth].
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
ValueError: If the bytestream does not start with 2051.
|
ValueError: If the bytestream does not start with 2051.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
print("Extracting", f.name)
|
print("Extracting", f.name)
|
||||||
with gzip.GzipFile(fileobj=f) as bytestream:
|
with gzip.GzipFile(fileobj=f) as bytestream:
|
||||||
magic = _read32(bytestream)
|
magic = _read32(bytestream)
|
||||||
@ -86,17 +83,17 @@ def _dense_to_one_hot(labels_dense, num_classes):
|
|||||||
def _extract_labels(f, one_hot=False, num_classes=10):
|
def _extract_labels(f, one_hot=False, num_classes=10):
|
||||||
"""Extract the labels into a 1D uint8 numpy array [index].
|
"""Extract the labels into a 1D uint8 numpy array [index].
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
f: A file object that can be passed into a gzip reader.
|
f: A file object that can be passed into a gzip reader.
|
||||||
one_hot: Does one hot encoding for the result.
|
one_hot: Does one hot encoding for the result.
|
||||||
num_classes: Number of classes for the one hot encoding.
|
num_classes: Number of classes for the one hot encoding.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
labels: a 1D uint8 numpy array.
|
labels: a 1D uint8 numpy array.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
ValueError: If the bystream doesn't start with 2049.
|
ValueError: If the bystream doesn't start with 2049.
|
||||||
"""
|
"""
|
||||||
print("Extracting", f.name)
|
print("Extracting", f.name)
|
||||||
with gzip.GzipFile(fileobj=f) as bytestream:
|
with gzip.GzipFile(fileobj=f) as bytestream:
|
||||||
magic = _read32(bytestream)
|
magic = _read32(bytestream)
|
||||||
@ -115,8 +112,8 @@ def _extract_labels(f, one_hot=False, num_classes=10):
|
|||||||
class _DataSet:
|
class _DataSet:
|
||||||
"""Container class for a _DataSet (deprecated).
|
"""Container class for a _DataSet (deprecated).
|
||||||
|
|
||||||
THIS CLASS IS DEPRECATED.
|
THIS CLASS IS DEPRECATED.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@deprecated(
|
@deprecated(
|
||||||
None,
|
None,
|
||||||
@ -135,21 +132,21 @@ class _DataSet:
|
|||||||
):
|
):
|
||||||
"""Construct a _DataSet.
|
"""Construct a _DataSet.
|
||||||
|
|
||||||
one_hot arg is used only if fake_data is true. `dtype` can be either
|
one_hot arg is used only if fake_data is true. `dtype` can be either
|
||||||
`uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
|
`uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
|
||||||
`[0, 1]`. Seed arg provides for convenient deterministic testing.
|
`[0, 1]`. Seed arg provides for convenient deterministic testing.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
images: The images
|
images: The images
|
||||||
labels: The labels
|
labels: The labels
|
||||||
fake_data: Ignore inages and labels, use fake data.
|
fake_data: Ignore inages and labels, use fake data.
|
||||||
one_hot: Bool, return the labels as one hot vectors (if True) or ints (if
|
one_hot: Bool, return the labels as one hot vectors (if True) or ints (if
|
||||||
False).
|
False).
|
||||||
dtype: Output image dtype. One of [uint8, float32]. `uint8` output has
|
dtype: Output image dtype. One of [uint8, float32]. `uint8` output has
|
||||||
range [0,255]. float32 output has range [0,1].
|
range [0,255]. float32 output has range [0,1].
|
||||||
reshape: Bool. If True returned images are returned flattened to vectors.
|
reshape: Bool. If True returned images are returned flattened to vectors.
|
||||||
seed: The random seed to use.
|
seed: The random seed to use.
|
||||||
"""
|
"""
|
||||||
seed1, seed2 = random_seed.get_seed(seed)
|
seed1, seed2 = random_seed.get_seed(seed)
|
||||||
# If op level seed is not set, use whatever graph level seed is returned
|
# If op level seed is not set, use whatever graph level seed is returned
|
||||||
numpy.random.seed(seed1 if seed is None else seed2)
|
numpy.random.seed(seed1 if seed is None else seed2)
|
||||||
@ -206,8 +203,8 @@ class _DataSet:
|
|||||||
else:
|
else:
|
||||||
fake_label = 0
|
fake_label = 0
|
||||||
return (
|
return (
|
||||||
[fake_image for _ in xrange(batch_size)],
|
[fake_image for _ in range(batch_size)],
|
||||||
[fake_label for _ in xrange(batch_size)],
|
[fake_label for _ in range(batch_size)],
|
||||||
)
|
)
|
||||||
start = self._index_in_epoch
|
start = self._index_in_epoch
|
||||||
# Shuffle for the first epoch
|
# Shuffle for the first epoch
|
||||||
@ -250,19 +247,19 @@ class _DataSet:
|
|||||||
def _maybe_download(filename, work_directory, source_url):
|
def _maybe_download(filename, work_directory, source_url):
|
||||||
"""Download the data from source url, unless it's already here.
|
"""Download the data from source url, unless it's already here.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
filename: string, name of the file in the directory.
|
filename: string, name of the file in the directory.
|
||||||
work_directory: string, path to working directory.
|
work_directory: string, path to working directory.
|
||||||
source_url: url to download from if file doesn't exist.
|
source_url: url to download from if file doesn't exist.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Path to resulting file.
|
Path to resulting file.
|
||||||
"""
|
"""
|
||||||
if not gfile.Exists(work_directory):
|
if not gfile.Exists(work_directory):
|
||||||
gfile.MakeDirs(work_directory)
|
gfile.MakeDirs(work_directory)
|
||||||
filepath = os.path.join(work_directory, filename)
|
filepath = os.path.join(work_directory, filename)
|
||||||
if not gfile.Exists(filepath):
|
if not gfile.Exists(filepath):
|
||||||
urllib.request.urlretrieve(source_url, filepath)
|
urllib.request.urlretrieve(source_url, filepath) # noqa: S310
|
||||||
with gfile.GFile(filepath) as f:
|
with gfile.GFile(filepath) as f:
|
||||||
size = f.size()
|
size = f.size()
|
||||||
print("Successfully downloaded", filename, size, "bytes.")
|
print("Successfully downloaded", filename, size, "bytes.")
|
||||||
@ -328,7 +325,8 @@ def read_data_sets(
|
|||||||
|
|
||||||
if not 0 <= validation_size <= len(train_images):
|
if not 0 <= validation_size <= len(train_images):
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"Validation size should be between 0 and {len(train_images)}. Received: {validation_size}."
|
f"Validation size should be between 0 and {len(train_images)}. "
|
||||||
|
f"Received: {validation_size}."
|
||||||
)
|
)
|
||||||
|
|
||||||
validation_images = train_images[:validation_size]
|
validation_images = train_images[:validation_size]
|
||||||
@ -336,7 +334,7 @@ def read_data_sets(
|
|||||||
train_images = train_images[validation_size:]
|
train_images = train_images[validation_size:]
|
||||||
train_labels = train_labels[validation_size:]
|
train_labels = train_labels[validation_size:]
|
||||||
|
|
||||||
options = dict(dtype=dtype, reshape=reshape, seed=seed)
|
options = {"dtype": dtype, "reshape": reshape, "seed": seed}
|
||||||
|
|
||||||
train = _DataSet(train_images, train_labels, **options)
|
train = _DataSet(train_images, train_labels, **options)
|
||||||
validation = _DataSet(validation_images, validation_labels, **options)
|
validation = _DataSet(validation_images, validation_labels, **options)
|
@ -15,7 +15,7 @@ scikit-fuzzy
|
|||||||
scikit-learn
|
scikit-learn
|
||||||
statsmodels
|
statsmodels
|
||||||
sympy
|
sympy
|
||||||
tensorflow; python_version < "3.11"
|
tensorflow
|
||||||
texttable
|
texttable
|
||||||
tweepy
|
tweepy
|
||||||
xgboost
|
xgboost
|
||||||
|
Loading…
x
Reference in New Issue
Block a user