Reenable files when TensorFlow supports the current Python (#11318)

* Remove python_version < '3.12' for tensorflow

* Reenable dynamic_programming/k_means_clustering_tensorflow.py

* updating DIRECTORY.md

* Try to fix ruff

* Try to fix ruff

* Try to fix ruff

* Try to fix ruff

* Try to fix ruff

* Reenable machine_learning/lstm/lstm_prediction.py

* updating DIRECTORY.md

* Try to fix ruff

* Reenable computer_vision/cnn_classification.py

* updating DIRECTORY.md

* Reenable neural_network/input_data.py

* updating DIRECTORY.md

* Try to fix ruff

* Try to fix ruff

* Try to fix mypy

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* Try to fix ruff

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

---------

Co-authored-by: MaximSmolskiy <MaximSmolskiy@users.noreply.github.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
Maxim Smolskiy 2024-03-12 11:35:49 +03:00 committed by GitHub
parent c6ca1942e1
commit fd27953d44
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 27 additions and 19 deletions

View File

@ -134,6 +134,7 @@
* [Run Length Encoding](compression/run_length_encoding.py) * [Run Length Encoding](compression/run_length_encoding.py)
## Computer Vision ## Computer Vision
* [Cnn Classification](computer_vision/cnn_classification.py)
* [Flip Augmentation](computer_vision/flip_augmentation.py) * [Flip Augmentation](computer_vision/flip_augmentation.py)
* [Haralick Descriptors](computer_vision/haralick_descriptors.py) * [Haralick Descriptors](computer_vision/haralick_descriptors.py)
* [Harris Corner](computer_vision/harris_corner.py) * [Harris Corner](computer_vision/harris_corner.py)
@ -344,6 +345,7 @@
* [Floyd Warshall](dynamic_programming/floyd_warshall.py) * [Floyd Warshall](dynamic_programming/floyd_warshall.py)
* [Integer Partition](dynamic_programming/integer_partition.py) * [Integer Partition](dynamic_programming/integer_partition.py)
* [Iterating Through Submasks](dynamic_programming/iterating_through_submasks.py) * [Iterating Through Submasks](dynamic_programming/iterating_through_submasks.py)
* [K Means Clustering Tensorflow](dynamic_programming/k_means_clustering_tensorflow.py)
* [Knapsack](dynamic_programming/knapsack.py) * [Knapsack](dynamic_programming/knapsack.py)
* [Largest Divisible Subset](dynamic_programming/largest_divisible_subset.py) * [Largest Divisible Subset](dynamic_programming/largest_divisible_subset.py)
* [Longest Common Subsequence](dynamic_programming/longest_common_subsequence.py) * [Longest Common Subsequence](dynamic_programming/longest_common_subsequence.py)
@ -571,6 +573,8 @@
* [Local Weighted Learning](machine_learning/local_weighted_learning/local_weighted_learning.py) * [Local Weighted Learning](machine_learning/local_weighted_learning/local_weighted_learning.py)
* [Logistic Regression](machine_learning/logistic_regression.py) * [Logistic Regression](machine_learning/logistic_regression.py)
* [Loss Functions](machine_learning/loss_functions.py) * [Loss Functions](machine_learning/loss_functions.py)
* Lstm
* [Lstm Prediction](machine_learning/lstm/lstm_prediction.py)
* [Mfcc](machine_learning/mfcc.py) * [Mfcc](machine_learning/mfcc.py)
* [Multilayer Perceptron Classifier](machine_learning/multilayer_perceptron_classifier.py) * [Multilayer Perceptron Classifier](machine_learning/multilayer_perceptron_classifier.py)
* [Polynomial Regression](machine_learning/polynomial_regression.py) * [Polynomial Regression](machine_learning/polynomial_regression.py)
@ -801,6 +805,7 @@
* [Swish](neural_network/activation_functions/swish.py) * [Swish](neural_network/activation_functions/swish.py)
* [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py)
* [Convolution Neural Network](neural_network/convolution_neural_network.py) * [Convolution Neural Network](neural_network/convolution_neural_network.py)
* [Input Data](neural_network/input_data.py)
* [Simple Neural Network](neural_network/simple_neural_network.py) * [Simple Neural Network](neural_network/simple_neural_network.py)
## Other ## Other

View File

@ -76,11 +76,9 @@ def encrypt_and_write_to_file(
key_size, n, e = read_key_file(key_filename) key_size, n, e = read_key_file(key_filename)
if key_size < block_size * 8: if key_size < block_size * 8:
sys.exit( sys.exit(
"ERROR: Block size is {} bits and key size is {} bits. The RSA cipher " f"ERROR: Block size is {block_size * 8} bits and key size is {key_size} "
"requires the block size to be equal to or greater than the key size. " "bits. The RSA cipher requires the block size to be equal to or greater "
"Either decrease the block size or use different keys.".format( "than the key size. Either decrease the block size or use different keys."
block_size * 8, key_size
)
) )
encrypted_blocks = [str(i) for i in encrypt_message(message, (n, e), block_size)] encrypted_blocks = [str(i) for i in encrypt_message(message, (n, e), block_size)]
@ -102,11 +100,10 @@ def read_from_file_and_decrypt(message_filename: str, key_filename: str) -> str:
if key_size < block_size * 8: if key_size < block_size * 8:
sys.exit( sys.exit(
"ERROR: Block size is {} bits and key size is {} bits. The RSA cipher " f"ERROR: Block size is {block_size * 8} bits and key size is {key_size} "
"requires the block size to be equal to or greater than the key size. " "bits. The RSA cipher requires the block size to be equal to or greater "
"Did you specify the correct key file and encrypted file?".format( "than the key size. Did you specify the correct key file and encrypted "
block_size * 8, key_size "file?"
)
) )
encrypted_blocks = [] encrypted_blocks = []

View File

@ -17,11 +17,11 @@ if __name__ == "__main__":
make sure you set the price column on line number 21. Here we make sure you set the price column on line number 21. Here we
use a dataset which have the price on 3rd column. use a dataset which have the price on 3rd column.
""" """
df = pd.read_csv("sample_data.csv", header=None) sample_data = pd.read_csv("sample_data.csv", header=None)
len_data = df.shape[:1][0] len_data = sample_data.shape[:1][0]
# If you're using some other dataset input the target column # If you're using some other dataset input the target column
actual_data = df.iloc[:, 1:2] actual_data = sample_data.iloc[:, 1:2]
actual_data = actual_data.values.reshape(len_data, 1) actual_data = actual_data.to_numpy().reshape(len_data, 1)
actual_data = MinMaxScaler().fit_transform(actual_data) actual_data = MinMaxScaler().fit_transform(actual_data)
look_back = 10 look_back = 10
forward_days = 5 forward_days = 5

View File

@ -18,9 +18,9 @@ This module and all its submodules are deprecated.
""" """
import collections
import gzip import gzip
import os import os
import typing
import urllib import urllib
import numpy import numpy
@ -28,7 +28,12 @@ from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated from tensorflow.python.util.deprecation import deprecated
_Datasets = collections.namedtuple("_Datasets", ["train", "validation", "test"])
class _Datasets(typing.NamedTuple):
train: "_DataSet"
validation: "_DataSet"
test: "_DataSet"
# CVDF mirror of http://yann.lecun.com/exdb/mnist/ # CVDF mirror of http://yann.lecun.com/exdb/mnist/
DEFAULT_SOURCE_URL = "https://storage.googleapis.com/cvdf-datasets/mnist/" DEFAULT_SOURCE_URL = "https://storage.googleapis.com/cvdf-datasets/mnist/"

View File

@ -24,8 +24,9 @@ class DoubleLinkedListNode(Generic[T, U]):
self.prev: DoubleLinkedListNode[T, U] | None = None self.prev: DoubleLinkedListNode[T, U] | None = None
def __repr__(self) -> str: def __repr__(self) -> str:
return "Node: key: {}, val: {}, freq: {}, has next: {}, has prev: {}".format( return (
self.key, self.val, self.freq, self.next is not None, self.prev is not None f"Node: key: {self.key}, val: {self.val}, freq: {self.freq}, "
f"has next: {self.next is not None}, has prev: {self.prev is not None}"
) )

View File

@ -17,7 +17,7 @@ rich
scikit-learn scikit-learn
statsmodels statsmodels
sympy sympy
tensorflow ; python_version < '3.12' tensorflow
tweepy tweepy
# yulewalker # uncomment once audio_filters/equal_loudness_filter.py is fixed # yulewalker # uncomment once audio_filters/equal_loudness_filter.py is fixed
typing_extensions typing_extensions