From 81661bd2d0c34363de7d3e1e802fe2f75b9a1fa4 Mon Sep 17 00:00:00 2001 From: Ayush Yadav <115359450+ayush-yadavv@users.noreply.github.com> Date: Wed, 4 Oct 2023 05:17:26 +0530 Subject: [PATCH 001/306] Update newtons_law_of_gravitation.py : Typo(Space Removed) (#9351) --- physics/newtons_law_of_gravitation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/physics/newtons_law_of_gravitation.py b/physics/newtons_law_of_gravitation.py index 4bbeddd61..ae9da2f1e 100644 --- a/physics/newtons_law_of_gravitation.py +++ b/physics/newtons_law_of_gravitation.py @@ -3,7 +3,7 @@ Title : Finding the value of either Gravitational Force, one of the masses or di provided that the other three parameters are given. Description : Newton's Law of Universal Gravitation explains the presence of force of -attraction between bodies having a definite mass situated at a distance. It is usually +attraction between bodies having a definite mass situated at a distance. It is usually stated as that, every particle attracts every other particle in the universe with a force that is directly proportional to the product of their masses and inversely proportional to the square of the distance between their centers. The publication of the From 12431389e32c290aae8c046ce9d8504d698d5f41 Mon Sep 17 00:00:00 2001 From: "Tan Kai Qun, Jeremy" Date: Wed, 4 Oct 2023 10:47:03 +0900 Subject: [PATCH 002/306] Add typing to topological_sort.py (#9650) * Add typing * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Jeremy Tan Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- sorts/topological_sort.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/sorts/topological_sort.py b/sorts/topological_sort.py index 59a0c8571..efce8165f 100644 --- a/sorts/topological_sort.py +++ b/sorts/topological_sort.py @@ -5,11 +5,17 @@ # b c # / \ # d e -edges = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []} -vertices = ["a", "b", "c", "d", "e"] +edges: dict[str, list[str]] = { + "a": ["c", "b"], + "b": ["d", "e"], + "c": [], + "d": [], + "e": [], +} +vertices: list[str] = ["a", "b", "c", "d", "e"] -def topological_sort(start, visited, sort): +def topological_sort(start: str, visited: list[str], sort: list[str]) -> list[str]: """Perform topological sort on a directed acyclic graph.""" current = start # add current to visited From 28f1e68f005f99eb628efd1af899bdfe1c1bc99e Mon Sep 17 00:00:00 2001 From: "Tan Kai Qun, Jeremy" Date: Wed, 4 Oct 2023 11:05:47 +0900 Subject: [PATCH 003/306] Add typing (#9651) Co-authored-by: Jeremy Tan --- sorts/stooge_sort.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sorts/stooge_sort.py b/sorts/stooge_sort.py index 9a5bedeae..767c6a059 100644 --- a/sorts/stooge_sort.py +++ b/sorts/stooge_sort.py @@ -1,4 +1,4 @@ -def stooge_sort(arr): +def stooge_sort(arr: list[int]) -> list[int]: """ Examples: >>> stooge_sort([18.1, 0, -7.1, -1, 2, 2]) @@ -11,7 +11,7 @@ def stooge_sort(arr): return arr -def stooge(arr, i, h): +def stooge(arr: list[int], i: int, h: int) -> None: if i >= h: return From a7133eca13d312fa729e2872048c7d9a662f6c8c Mon Sep 17 00:00:00 2001 From: "Tan Kai Qun, Jeremy" Date: Wed, 4 Oct 2023 11:06:52 +0900 Subject: [PATCH 004/306] Add typing (#9652) Co-authored-by: Jeremy Tan --- sorts/shell_sort.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sorts/shell_sort.py b/sorts/shell_sort.py index 10ae9ba40..b65609c97 100644 --- a/sorts/shell_sort.py +++ b/sorts/shell_sort.py @@ -3,7 +3,7 @@ https://en.wikipedia.org/wiki/Shellsort#Pseudocode """ -def shell_sort(collection): +def shell_sort(collection: list[int]) -> list[int]: """Pure implementation of shell sort algorithm in Python :param collection: Some mutable ordered collection with heterogeneous comparable items inside From 8c23cc5117b338ea907045260274ac40301a4e0e Mon Sep 17 00:00:00 2001 From: "Tan Kai Qun, Jeremy" Date: Wed, 4 Oct 2023 11:07:25 +0900 Subject: [PATCH 005/306] Add typing (#9654) Co-authored-by: Jeremy Tan --- sorts/selection_sort.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sorts/selection_sort.py b/sorts/selection_sort.py index f3beb31b7..28971a5e1 100644 --- a/sorts/selection_sort.py +++ b/sorts/selection_sort.py @@ -11,7 +11,7 @@ python selection_sort.py """ -def selection_sort(collection): +def selection_sort(collection: list[int]) -> list[int]: """Pure implementation of the selection sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous comparable items inside From 700df39ad446da895d413c0383632871459f0e9f Mon Sep 17 00:00:00 2001 From: aryan1165 <111041731+aryan1165@users.noreply.github.com> Date: Wed, 4 Oct 2023 09:04:55 +0530 Subject: [PATCH 006/306] Fixed file name in transposition_cipher_encrypt_decrypt_file.py. Fixing bug file not found. (#9426) * Fixed file name in trnasposition_cipher_encrypt_decrypt_file.py * Removed Output.txt * Removed Output.txt * Fixed build errors --- ciphers/prehistoric_men.txt | 1196 ++++++++--------- ...ansposition_cipher_encrypt_decrypt_file.py | 4 +- 2 files changed, 600 insertions(+), 600 deletions(-) diff --git a/ciphers/prehistoric_men.txt b/ciphers/prehistoric_men.txt index a58e533a8..8d1b2bd8c 100644 --- a/ciphers/prehistoric_men.txt +++ b/ciphers/prehistoric_men.txt @@ -40,8 +40,8 @@ Transcriber's note: version referred to above. One example of this might occur in the second paragraph under "Choppers and Adze-like Tools", page 46, which contains the phrase - an adze cutting edge is ? shaped. The symbol before - shaped looks like a sharply-italicized sans-serif L. + �an adze cutting edge is ? shaped�. The symbol before + �shaped� looks like a sharply-italicized sans-serif �L�. Devices that cannot display that symbol may substitute a question mark, a square, or other symbol. @@ -98,7 +98,7 @@ forced or pedantic; at least I have done my very best to tell the story simply and clearly. Many friends have aided in the preparation of the book. The whimsical -charm of Miss Susan Richerts illustrations add enormously to the +charm of Miss Susan Richert�s illustrations add enormously to the spirit I wanted. She gave freely of her own time on the drawings and in planning the book with me. My colleagues at the University of Chicago, especially Professor Wilton M. Krogman (now of the University @@ -108,7 +108,7 @@ the Department of Anthropology, gave me counsel in matters bearing on their special fields, and the Department of Anthropology bore some of the expense of the illustrations. From Mrs. Irma Hunter and Mr. Arnold Maremont, who are not archeologists at all and have only an intelligent -laymans notion of archeology, I had sound advice on how best to tell +layman�s notion of archeology, I had sound advice on how best to tell the story. I am deeply indebted to all these friends. While I was preparing the second edition, I had the great fortune @@ -117,13 +117,13 @@ Washburn, now of the Department of Anthropology of the University of California, and the fourth, fifth, and sixth chapters with Professor Hallum L. Movius, Jr., of the Peabody Museum, Harvard University. The book has gained greatly in accuracy thereby. In matters of dating, -Professor Movius and the indications of Professor W. F. Libbys Carbon +Professor Movius and the indications of Professor W. F. Libby�s Carbon 14 chronology project have both encouraged me to choose the lowest dates now current for the events of the Pleistocene Ice Age. There is still no certain way of fixing a direct chronology for most of the -Pleistocene, but Professor Libbys method appears very promising for +Pleistocene, but Professor Libby�s method appears very promising for its end range and for proto-historic dates. In any case, this book -names periods, and new dates may be written in against mine, if new +names �periods,� and new dates may be written in against mine, if new and better dating systems appear. I wish to thank Dr. Clifford C. Gregg, Director of Chicago Natural @@ -150,7 +150,7 @@ Clark Howell of the Department of Anthropology of the University of Chicago in reworking the earlier chapters, and he was very patient in the matter, which I sincerely appreciate. -All of Mrs. Susan Richert Allens original drawings appear, but a few +All of Mrs. Susan Richert Allen�s original drawings appear, but a few necessary corrections have been made in some of the charts and some new drawings have been added by Mr. John Pfiffner, Staff Artist, Chicago Natural History Museum. @@ -200,7 +200,7 @@ HOW WE LEARN about Prehistoric Men Prehistory means the time before written history began. Actually, more -than 99 per cent of mans story is prehistory. Man is at least half a +than 99 per cent of man�s story is prehistory. Man is at least half a million years old, but he did not begin to write history (or to write anything) until about 5,000 years ago. @@ -216,7 +216,7 @@ The scientists who study the bones and teeth and any other parts they find of the bodies of prehistoric men, are called _physical anthropologists_. Physical anthropologists are trained, much like doctors, to know all about the human body. They study living people, -too; they know more about the biological facts of human races than +too; they know more about the biological facts of human �races� than anybody else. If the police find a badly decayed body in a trunk, they ask a physical anthropologist to tell them what the person originally looked like. The physical anthropologists who specialize in @@ -228,14 +228,14 @@ ARCHEOLOGISTS There is a kind of scientist who studies the things that prehistoric men made and did. Such a scientist is called an _archeologist_. It is -the archeologists business to look for the stone and metal tools, the +the archeologist�s business to look for the stone and metal tools, the pottery, the graves, and the caves or huts of the men who lived before history began. But there is more to archeology than just looking for things. In -Professor V. Gordon Childes words, archeology furnishes a sort of +Professor V. Gordon Childe�s words, archeology �furnishes a sort of history of human activity, provided always that the actions have -produced concrete results and left recognizable material traces. You +produced concrete results and left recognizable material traces.� You will see that there are at least three points in what Childe says: 1. The archeologists have to find the traces of things left behind by @@ -245,7 +245,7 @@ will see that there are at least three points in what Childe says: too soft or too breakable to last through the years. However, 3. The archeologist must use whatever he can find to tell a story--to - make a sort of history--from the objects and living-places and + make a �sort of history�--from the objects and living-places and graves that have escaped destruction. What I mean is this: Let us say you are walking through a dump yard, @@ -253,8 +253,8 @@ and you find a rusty old spark plug. If you want to think about what the spark plug means, you quickly remember that it is a part of an automobile motor. This tells you something about the man who threw the spark plug on the dump. He either had an automobile, or he knew -or lived near someone who did. He cant have lived so very long ago, -youll remember, because spark plugs and automobiles are only about +or lived near someone who did. He can�t have lived so very long ago, +you�ll remember, because spark plugs and automobiles are only about sixty years old. When you think about the old spark plug in this way you have @@ -264,8 +264,8 @@ It is the same way with the man-made things we archeologists find and put in museums. Usually, only a few of these objects are pretty to look at; but each of them has some sort of story to tell. Making the interpretation of his finds is the most important part of the -archeologists job. It is the way he gets at the sort of history of -human activity which is expected of archeology. +archeologist�s job. It is the way he gets at the �sort of history of +human activity� which is expected of archeology. SOME OTHER SCIENTISTS @@ -274,7 +274,7 @@ There are many other scientists who help the archeologist and the physical anthropologist find out about prehistoric men. The geologists help us tell the age of the rocks or caves or gravel beds in which human bones or man-made objects are found. There are other scientists -with names which all begin with paleo (the Greek word for old). The +with names which all begin with �paleo� (the Greek word for �old�). The _paleontologists_ study fossil animals. There are also, for example, such scientists as _paleobotanists_ and _paleoclimatologists_, who study ancient plants and climates. These scientists help us to know @@ -306,20 +306,20 @@ systems. The rate of disappearance of radioactivity as time passes.[1]] [1] It is important that the limitations of the radioactive carbon - dating system be held in mind. As the statistics involved in + �dating� system be held in mind. As the statistics involved in the system are used, there are two chances in three that the - date of the sample falls within the range given as plus or - minus an added number of years. For example, the date for the - Jarmo village (see chart), given as 6750 200 B.C., really + �date� of the sample falls within the range given as plus or + minus an added number of years. For example, the �date� for the + Jarmo village (see chart), given as 6750 � 200 B.C., really means that there are only two chances in three that the real date of the charcoal sampled fell between 6950 and 6550 B.C. We have also begun to suspect that there are ways in which the - samples themselves may have become contaminated, either on + samples themselves may have become �contaminated,� either on the early or on the late side. We now tend to be suspicious of single radioactive carbon determinations, or of determinations from one site alone. But as a fabric of consistent determinations for several or more sites of one archeological - period, we gain confidence in the dates. + period, we gain confidence in the dates. HOW THE SCIENTISTS FIND OUT @@ -330,9 +330,9 @@ about prehistoric men. We also need a word about _how_ they find out. All our finds came by accident until about a hundred years ago. Men digging wells, or digging in caves for fertilizer, often turned up ancient swords or pots or stone arrowheads. People also found some odd -pieces of stone that didnt look like natural forms, but they also -didnt look like any known tool. As a result, the people who found them -gave them queer names; for example, thunderbolts. The people thought +pieces of stone that didn�t look like natural forms, but they also +didn�t look like any known tool. As a result, the people who found them +gave them queer names; for example, �thunderbolts.� The people thought the strange stones came to earth as bolts of lightning. We know now that these strange stones were prehistoric stone tools. @@ -349,7 +349,7 @@ story of cave men on Mount Carmel, in Palestine, began to be known. Planned archeological digging is only about a century old. Even before this, however, a few men realized the significance of objects they dug from the ground; one of these early archeologists was our own Thomas -Jefferson. The first real mound-digger was a German grocers clerk, +Jefferson. The first real mound-digger was a German grocer�s clerk, Heinrich Schliemann. Schliemann made a fortune as a merchant, first in Europe and then in the California gold-rush of 1849. He became an American citizen. Then he retired and had both money and time to test @@ -389,16 +389,16 @@ used had been a soft, unbaked mud-brick, and most of the debris consisted of fallen or rain-melted mud from these mud-bricks. This idea of _stratification_, like the cake layers, was already a -familiar one to the geologists by Schliemanns time. They could show +familiar one to the geologists by Schliemann�s time. They could show that their lowest layer of rock was oldest or earliest, and that the -overlying layers became more recent as one moved upward. Schliemanns +overlying layers became more recent as one moved upward. Schliemann�s digging proved the same thing at Troy. His first (lowest and earliest) city had at least nine layers above it; he thought that the second -layer contained the remains of Homers Troy. We now know that Homeric +layer contained the remains of Homer�s Troy. We now know that Homeric Troy was layer VIIa from the bottom; also, we count eleven layers or sub-layers in total. -Schliemanns work marks the beginnings of modern archeology. Scholars +Schliemann�s work marks the beginnings of modern archeology. Scholars soon set out to dig on ancient sites, from Egypt to Central America. @@ -410,21 +410,21 @@ Archeologists began to get ideas as to the kinds of objects that belonged together. If you compared a mail-order catalogue of 1890 with one of today, you would see a lot of differences. If you really studied the two catalogues hard, you would also begin to see that certain -objects go together. Horseshoes and metal buggy tires and pieces of +objects �go together.� Horseshoes and metal buggy tires and pieces of harness would begin to fit into a picture with certain kinds of coal stoves and furniture and china dishes and kerosene lamps. Our friend the spark plug, and radios and electric refrigerators and light bulbs would fit into a picture with different kinds of furniture and dishes -and tools. You wont be old enough to remember the kind of hats that -women wore in 1890, but youve probably seen pictures of them, and you -know very well they couldnt be worn with the fashions of today. +and tools. You won�t be old enough to remember the kind of hats that +women wore in 1890, but you�ve probably seen pictures of them, and you +know very well they couldn�t be worn with the fashions of today. This is one of the ways that archeologists study their materials. The various tools and weapons and jewelry, the pottery, the kinds of houses, and even the ways of burying the dead tend to fit into pictures. Some archeologists call all of the things that go together to make such a picture an _assemblage_. The assemblage of the first layer -of Schliemanns Troy was as different from that of the seventh layer as +of Schliemann�s Troy was as different from that of the seventh layer as our 1900 mail-order catalogue is from the one of today. The archeologists who came after Schliemann began to notice other @@ -433,23 +433,23 @@ idea that people will buy better mousetraps goes back into very ancient times. Today, if we make good automobiles or radios, we can sell some of them in Turkey or even in Timbuktu. This means that a few present-day types of American automobiles and radios form part -of present-day assemblages in both Turkey and Timbuktu. The total -present-day assemblage of Turkey is quite different from that of +of present-day �assemblages� in both Turkey and Timbuktu. The total +present-day �assemblage� of Turkey is quite different from that of Timbuktu or that of America, but they have at least some automobiles and some radios in common. Now these automobiles and radios will eventually wear out. Let us suppose we could go to some remote part of Turkey or to Timbuktu in a -dream. We dont know what the date is, in our dream, but we see all +dream. We don�t know what the date is, in our dream, but we see all sorts of strange things and ways of living in both places. Nobody tells us what the date is. But suddenly we see a 1936 Ford; so we know that in our dream it has to be at least the year 1936, and only as many years after that as we could reasonably expect a Ford to keep -in running order. The Ford would probably break down in twenty years -time, so the Turkish or Timbuktu assemblage were seeing in our dream +in running order. The Ford would probably break down in twenty years� +time, so the Turkish or Timbuktu �assemblage� we�re seeing in our dream has to date at about A.D. 1936-56. -Archeologists not only date their ancient materials in this way; they +Archeologists not only �date� their ancient materials in this way; they also see over what distances and between which peoples trading was done. It turns out that there was a good deal of trading in ancient times, probably all on a barter and exchange basis. @@ -480,13 +480,13 @@ site. They find the remains of everything that would last through time, in several different layers. They know that the assemblage in the bottom layer was laid down earlier than the assemblage in the next layer above, and so on up to the topmost layer, which is the latest. -They look at the results of other digs and find that some other +They look at the results of other �digs� and find that some other archeologist 900 miles away has found ax-heads in his lowest layer, exactly like the ax-heads of their fifth layer. This means that their fifth layer must have been lived in at about the same time as was the first layer in the site 200 miles away. It also may mean that the people who lived in the two layers knew and traded with each other. Or -it could mean that they didnt necessarily know each other, but simply +it could mean that they didn�t necessarily know each other, but simply that both traded with a third group at about the same time. You can see that the more we dig and find, the more clearly the main @@ -501,8 +501,8 @@ those of domesticated animals, for instance, sheep or cattle, and therefore the people must have kept herds. More important than anything else--as our structure grows more -complicated and our materials increase--is the fact that a sort -of history of human activity does begin to appear. The habits or +complicated and our materials increase--is the fact that �a sort +of history of human activity� does begin to appear. The habits or traditions that men formed in the making of their tools and in the ways they did things, begin to stand out for us. How characteristic were these habits and traditions? What areas did they spread over? @@ -519,7 +519,7 @@ method--chemical tests of the bones--that will enable them to discover what the blood-type may have been. One thing is sure. We have never found a group of skeletons so absolutely similar among themselves--so cast from a single mould, so to speak--that we could claim to have a -pure race. I am sure we never shall. +�pure� race. I am sure we never shall. We become particularly interested in any signs of change--when new materials and tool types and ways of doing things replace old ones. We @@ -527,7 +527,7 @@ watch for signs of social change and progress in one way or another. We must do all this without one word of written history to aid us. Everything we are concerned with goes back to the time _before_ men -learned to write. That is the prehistorians job--to find out what +learned to write. That is the prehistorian�s job--to find out what happened before history began. @@ -538,9 +538,9 @@ THE CHANGING WORLD in which Prehistoric Men Lived [Illustration] -Mankind, well say, is at least a half million years old. It is very +Mankind, we�ll say, is at least a half million years old. It is very hard to understand how long a time half a million years really is. -If we were to compare this whole length of time to one day, wed get +If we were to compare this whole length of time to one day, we�d get something like this: The present time is midnight, and Jesus was born just five minutes and thirty-six seconds ago. Earliest history began less than fifteen minutes ago. Everything before 11:45 was in @@ -569,7 +569,7 @@ book; it would mainly affect the dates earlier than 25,000 years ago. CHANGES IN ENVIRONMENT -The earth probably hasnt changed much in the last 5,000 years (250 +The earth probably hasn�t changed much in the last 5,000 years (250 generations). Men have built things on its surface and dug into it and drawn boundaries on maps of it, but the places where rivers, lakes, seas, and mountains now stand have changed very little. @@ -605,7 +605,7 @@ the glaciers covered most of Canada and the northern United States and reached down to southern England and France in Europe. Smaller ice sheets sat like caps on the Rockies, the Alps, and the Himalayas. The continental glaciation only happened north of the equator, however, so -remember that Ice Age is only half true. +remember that �Ice Age� is only half true. As you know, the amount of water on and about the earth does not vary. These large glaciers contained millions of tons of water frozen into @@ -677,9 +677,9 @@ their dead. At about the time when the last great glacier was finally melting away, men in the Near East made the first basic change in human economy. They began to plant grain, and they learned to raise and herd certain -animals. This meant that they could store food in granaries and on the -hoof against the bad times of the year. This first really basic change -in mans way of living has been called the food-producing revolution. +animals. This meant that they could store food in granaries and �on the +hoof� against the bad times of the year. This first really basic change +in man�s way of living has been called the �food-producing revolution.� By the time it happened, a modern kind of climate was beginning. Men had already grown to look as they do now. Know-how in ways of living had developed and progressed, slowly but surely, up to a point. It was @@ -698,25 +698,25 @@ Prehistoric Men THEMSELVES DO WE KNOW WHERE MAN ORIGINATED? -For a long time some scientists thought the cradle of mankind was in +For a long time some scientists thought the �cradle of mankind� was in central Asia. Other scientists insisted it was in Africa, and still -others said it might have been in Europe. Actually, we dont know -where it was. We dont even know that there was only _one_ cradle. -If we had to choose a cradle at this moment, we would probably say +others said it might have been in Europe. Actually, we don�t know +where it was. We don�t even know that there was only _one_ �cradle.� +If we had to choose a �cradle� at this moment, we would probably say Africa. But the southern portions of Asia and Europe may also have been included in the general area. The scene of the early development of -mankind was certainly the Old World. It is pretty certain men didnt +mankind was certainly the Old World. It is pretty certain men didn�t reach North or South America until almost the end of the Ice Age--had they done so earlier we would certainly have found some trace of them by now. The earliest tools we have yet found come from central and south -Africa. By the dating system Im using, these tools must be over +Africa. By the dating system I�m using, these tools must be over 500,000 years old. There are now reports that a few such early tools have been found--at the Sterkfontein cave in South Africa--along with -the bones of small fossil men called australopithecines. +the bones of small fossil men called �australopithecines.� -Not all scientists would agree that the australopithecines were men, +Not all scientists would agree that the australopithecines were �men,� or would agree that the tools were made by the australopithecines themselves. For these sticklers, the earliest bones of men come from the island of Java. The date would be about 450,000 years ago. So far, @@ -727,12 +727,12 @@ Let me say it another way. How old are the earliest traces of men we now have? Over half a million years. This was a time when the first alpine glaciation was happening in the north. What has been found so far? The tools which the men of those times made, in different parts -of Africa. It is now fairly generally agreed that the men who made -the tools were the australopithecines. There is also a more man-like +of Africa. It is now fairly generally agreed that the �men� who made +the tools were the australopithecines. There is also a more �man-like� jawbone at Kanam in Kenya, but its find-spot has been questioned. The next earliest bones we have were found in Java, and they may be almost a hundred thousand years younger than the earliest African finds. We -havent yet found the tools of these early Javanese. Our knowledge of +haven�t yet found the tools of these early Javanese. Our knowledge of tool-using in Africa spreads quickly as time goes on: soon after the appearance of tools in the south we shall have them from as far north as Algeria. @@ -758,30 +758,30 @@ prove it. MEN AND APES Many people used to get extremely upset at the ill-formed notion -that man descended from the apes. Such words were much more likely -to start fights or monkey trials than the correct notion that all +that �man descended from the apes.� Such words were much more likely +to start fights or �monkey trials� than the correct notion that all living animals, including man, ascended or evolved from a single-celled organism which lived in the primeval seas hundreds of millions of years -ago. Men are mammals, of the order called Primates, and mans living -relatives are the great apes. Men didnt descend from the apes or +ago. Men are mammals, of the order called Primates, and man�s living +relatives are the great apes. Men didn�t �descend� from the apes or apes from men, and mankind must have had much closer relatives who have since become extinct. Men stand erect. They also walk and run on their two feet. Apes are happiest in trees, swinging with their arms from branch to branch. Few branches of trees will hold the mighty gorilla, although he still -manages to sleep in trees. Apes cant stand really erect in our sense, +manages to sleep in trees. Apes can�t stand really erect in our sense, and when they have to run on the ground, they use the knuckles of their hands as well as their feet. A key group of fossil bones here are the south African australopithecines. These are called the _Australopithecinae_ or -man-apes or sometimes even ape-men. We do not _know_ that they were +�man-apes� or sometimes even �ape-men.� We do not _know_ that they were directly ancestral to men but they can hardly have been so to apes. -Presently Ill describe them a bit more. The reason I mention them +Presently I�ll describe them a bit more. The reason I mention them here is that while they had brains no larger than those of apes, their hipbones were enough like ours so that they must have stood erect. -There is no good reason to think they couldnt have walked as we do. +There is no good reason to think they couldn�t have walked as we do. BRAINS, HANDS, AND TOOLS @@ -801,12 +801,12 @@ Nobody knows which of these three is most important, or which came first. Most probably the growth of all three things was very much blended together. If you think about each of the things, you will see what I mean. Unless your hand is more flexible than a paw, and your -thumb will work against (or oppose) your fingers, you cant hold a tool -very well. But you wouldnt get the idea of using a tool unless you had +thumb will work against (or oppose) your fingers, you can�t hold a tool +very well. But you wouldn�t get the idea of using a tool unless you had enough brain to help you see cause and effect. And it is rather hard to see how your hand and brain would develop unless they had something to -practice on--like using tools. In Professor Krogmans words, the hand -must become the obedient servant of the eye and the brain. It is the +practice on--like using tools. In Professor Krogman�s words, �the hand +must become the obedient servant of the eye and the brain.� It is the _co-ordination_ of these things that counts. Many other things must have been happening to the bodies of the @@ -820,17 +820,17 @@ little by little, all together. Men became men very slowly. WHEN SHALL WE CALL MEN MEN? -What do I mean when I say men? People who looked pretty much as we +What do I mean when I say �men�? People who looked pretty much as we do, and who used different tools to do different things, are men to me. -Well probably never know whether the earliest ones talked or not. They +We�ll probably never know whether the earliest ones talked or not. They probably had vocal cords, so they could make sounds, but did they know how to make sounds work as symbols to carry meanings? But if the fossil -bones look like our skeletons, and if we find tools which well agree -couldnt have been made by nature or by animals, then Id say we had +bones look like our skeletons, and if we find tools which we�ll agree +couldn�t have been made by nature or by animals, then I�d say we had traces of _men_. The australopithecine finds of the Transvaal and Bechuanaland, in -south Africa, are bound to come into the discussion here. Ive already +south Africa, are bound to come into the discussion here. I�ve already told you that the australopithecines could have stood upright and walked on their two hind legs. They come from the very base of the Pleistocene or Ice Age, and a few coarse stone tools have been found @@ -848,17 +848,17 @@ bones. The doubt as to whether the australopithecines used the tools themselves goes like this--just suppose some man-like creature (whose bones we have not yet found) made the tools and used them to kill and butcher australopithecines. Hence a few experts tend to let -australopithecines still hang in limbo as man-apes. +australopithecines still hang in limbo as �man-apes.� THE EARLIEST MEN WE KNOW -Ill postpone talking about the tools of early men until the next +I�ll postpone talking about the tools of early men until the next chapter. The men whose bones were the earliest of the Java lot have been given the name _Meganthropus_. The bones are very fragmentary. We would not understand them very well unless we had the somewhat later -Javanese lot--the more commonly known _Pithecanthropus_ or Java -man--against which to refer them for study. One of the less well-known +Javanese lot--the more commonly known _Pithecanthropus_ or �Java +man�--against which to refer them for study. One of the less well-known and earliest fragments, a piece of lower jaw and some teeth, rather strongly resembles the lower jaws and teeth of the australopithecine type. Was _Meganthropus_ a sort of half-way point between the @@ -872,7 +872,7 @@ finds of Java man were made in 1891-92 by Dr. Eugene Dubois, a Dutch doctor in the colonial service. Finds have continued to be made. There are now bones enough to account for four skulls. There are also four jaws and some odd teeth and thigh bones. Java man, generally speaking, -was about five feet six inches tall, and didnt hold his head very +was about five feet six inches tall, and didn�t hold his head very erect. His skull was very thick and heavy and had room for little more than two-thirds as large a brain as we have. He had big teeth and a big jaw and enormous eyebrow ridges. @@ -885,22 +885,22 @@ belonged to his near descendants. Remember that there are several varieties of men in the whole early Java lot, at least two of which are earlier than the _Pithecanthropus_, -Java man. Some of the earlier ones seem to have gone in for +�Java man.� Some of the earlier ones seem to have gone in for bigness, in tooth-size at least. _Meganthropus_ is one of these earlier varieties. As we said, he _may_ turn out to be a link to the australopithecines, who _may_ or _may not_ be ancestral to men. _Meganthropus_ is best understandable in terms of _Pithecanthropus_, who appeared later in the same general area. _Pithecanthropus_ is pretty well understandable from the bones he left us, and also because -of his strong resemblance to the fully tool-using cave-dwelling Peking -man, _Sinanthropus_, about whom we shall talk next. But you can see +of his strong resemblance to the fully tool-using cave-dwelling �Peking +man,� _Sinanthropus_, about whom we shall talk next. But you can see that the physical anthropologists and prehistoric archeologists still have a lot of work to do on the problem of earliest men. PEKING MEN AND SOME EARLY WESTERNERS -The earliest known Chinese are called _Sinanthropus_, or Peking man, +The earliest known Chinese are called _Sinanthropus_, or �Peking man,� because the finds were made near that city. In World War II, the United States Marine guard at our Embassy in Peking tried to help get the bones out of the city before the Japanese attack. Nobody knows where @@ -913,9 +913,9 @@ casts of the bones. Peking man lived in a cave in a limestone hill, made tools, cracked animal bones to get the marrow out, and used fire. Incidentally, the bones of Peking man were found because Chinese dig for what they call -dragon bones and dragon teeth. Uneducated Chinese buy these things +�dragon bones� and �dragon teeth.� Uneducated Chinese buy these things in their drug stores and grind them into powder for medicine. The -dragon teeth and bones are really fossils of ancient animals, and +�dragon teeth� and �bones� are really fossils of ancient animals, and sometimes of men. The people who supply the drug stores have learned where to dig for strange bones and teeth. Paleontologists who get to China go to the drug stores to buy fossils. In a roundabout way, this @@ -924,7 +924,7 @@ is how the fallen-in cave of Peking man at Choukoutien was discovered. Peking man was not quite as tall as Java man but he probably stood straighter. His skull looked very much like that of the Java skull except that it had room for a slightly larger brain. His face was less -brutish than was Java mans face, but this isnt saying much. +brutish than was Java man�s face, but this isn�t saying much. Peking man dates from early in the interglacial period following the second alpine glaciation. He probably lived close to 350,000 years @@ -946,9 +946,9 @@ big ridges over the eyes. The more fragmentary skull from Swanscombe in England (p. 11) has been much more carefully studied. Only the top and back of that skull have been found. Since the skull rounds up nicely, it has been assumed that the face and forehead must have been quite -modern. Careful comparison with Steinheim shows that this was not +�modern.� Careful comparison with Steinheim shows that this was not necessarily so. This is important because it bears on the question of -how early truly modern man appeared. +how early truly �modern� man appeared. Recently two fragmentary jaws were found at Ternafine in Algeria, northwest Africa. They look like the jaws of Peking man. Tools were @@ -971,22 +971,22 @@ modern Australian natives. During parts of the Ice Age there was a land bridge all the way from Java to Australia. -TWO ENGLISHMEN WHO WERENT OLD +TWO ENGLISHMEN WHO WEREN�T OLD The older textbooks contain descriptions of two English finds which were thought to be very old. These were called Piltdown (_Eoanthropus dawsoni_) and Galley Hill. The skulls were very modern in appearance. In 1948-49, British scientists began making chemical tests which proved that neither of these finds is very old. It is now known that both -Piltdown man and the tools which were said to have been found with +�Piltdown man� and the tools which were said to have been found with him were part of an elaborate fake! -TYPICAL CAVE MEN +TYPICAL �CAVE MEN� The next men we have to talk about are all members of a related group. -These are the Neanderthal group. Neanderthal man himself was found in -the Neander Valley, near Dsseldorf, Germany, in 1856. He was the first +These are the Neanderthal group. �Neanderthal man� himself was found in +the Neander Valley, near D�sseldorf, Germany, in 1856. He was the first human fossil to be recognized as such. [Illustration: PRINCIPAL KNOWN TYPES OF FOSSIL MEN @@ -999,7 +999,7 @@ human fossil to be recognized as such. PITHECANTHROPUS] Some of us think that the neanderthaloids proper are only those people -of western Europe who didnt get out before the beginning of the last +of western Europe who didn�t get out before the beginning of the last great glaciation, and who found themselves hemmed in by the glaciers in the Alps and northern Europe. Being hemmed in, they intermarried a bit too much and developed into a special type. Professor F. Clark @@ -1010,7 +1010,7 @@ pre-neanderthaloids. There are traces of these pre-neanderthaloids pretty much throughout Europe during the third interglacial period--say 100,000 years ago. The pre-neanderthaloids are represented by such finds as the ones at Ehringsdorf in Germany and Saccopastore in Italy. -I wont describe them for you, since they are simply less extreme than +I won�t describe them for you, since they are simply less extreme than the neanderthaloids proper--about half way between Steinheim and the classic Neanderthal people. @@ -1019,24 +1019,24 @@ get caught in the pocket of the southwest corner of Europe at the onset of the last great glaciation became the classic Neanderthalers. Out in the Near East, Howell thinks, it is possible to see traces of people evolving from the pre-neanderthaloid type toward that of fully modern -man. Certainly, we dont see such extreme cases of neanderthaloidism +man. Certainly, we don�t see such extreme cases of �neanderthaloidism� outside of western Europe. There are at least a dozen good examples in the main or classic Neanderthal group in Europe. They date to just before and in the earlier part of the last great glaciation (85,000 to 40,000 years ago). -Many of the finds have been made in caves. The cave men the movies +Many of the finds have been made in caves. The �cave men� the movies and the cartoonists show you are probably meant to be Neanderthalers. -Im not at all sure they dragged their women by the hair; the women +I�m not at all sure they dragged their women by the hair; the women were probably pretty tough, too! Neanderthal men had large bony heads, but plenty of room for brains. Some had brain cases even larger than the average for modern man. Their faces were heavy, and they had eyebrow ridges of bone, but the ridges were not as big as those of Java man. Their foreheads were very low, -and they didnt have much chin. They were about five feet three inches -tall, but were heavy and barrel-chested. But the Neanderthalers didnt -slouch as much as theyve been blamed for, either. +and they didn�t have much chin. They were about five feet three inches +tall, but were heavy and barrel-chested. But the Neanderthalers didn�t +slouch as much as they�ve been blamed for, either. One important thing about the Neanderthal group is that there is a fair number of them to study. Just as important is the fact that we know @@ -1059,10 +1059,10 @@ different-looking people. EARLY MODERN MEN -How early is modern man (_Homo sapiens_), the wise man? Some people +How early is modern man (_Homo sapiens_), the �wise man�? Some people have thought that he was very early, a few still think so. Piltdown and Galley Hill, which were quite modern in anatomical appearance and -_supposedly_ very early in date, were the best evidence for very +_supposedly_ very early in date, were the best �evidence� for very early modern men. Now that Piltdown has been liquidated and Galley Hill is known to be very late, what is left of the idea? @@ -1073,13 +1073,13 @@ the Ternafine jaws, you might come to the conclusion that the crown of the Swanscombe head was that of a modern-like man. Two more skulls, again without faces, are available from a French -cave site, Fontchevade. They come from the time of the last great +cave site, Font�chevade. They come from the time of the last great interglacial, as did the pre-neanderthaloids. The crowns of the -Fontchevade skulls also look quite modern. There is a bit of the +Font�chevade skulls also look quite modern. There is a bit of the forehead preserved on one of these skulls and the brow-ridge is not heavy. Nevertheless, there is a suggestion that the bones belonged to an immature individual. In this case, his (or even more so, if _her_) -brow-ridges would have been weak anyway. The case for the Fontchevade +brow-ridges would have been weak anyway. The case for the Font�chevade fossils, as modern type men, is little stronger than that for Swanscombe, although Professor Vallois believes it a good case. @@ -1101,8 +1101,8 @@ of the onset of colder weather, when the last glaciation was beginning in the north--say 75,000 years ago. The 70 per cent modern group came from only one cave, Mugharet es-Skhul -(cave of the kids). The other group, from several caves, had bones of -men of the type weve been calling pre-neanderthaloid which we noted +(�cave of the kids�). The other group, from several caves, had bones of +men of the type we�ve been calling pre-neanderthaloid which we noted were widespread in Europe and beyond. The tools which came with each of these finds were generally similar, and McCown and Keith, and other scholars since their study, have tended to assume that both the Skhul @@ -1131,26 +1131,26 @@ important fossil men of later Europe are shown in the chart on page DIFFERENCES IN THE EARLY MODERNS The main early European moderns have been divided into two groups, the -Cro-Magnon group and the Combe Capelle-Brnn group. Cro-Magnon people +Cro-Magnon group and the Combe Capelle-Br�nn group. Cro-Magnon people were tall and big-boned, with large, long, and rugged heads. They must have been built like many present-day Scandinavians. The Combe -Capelle-Brnn people were shorter; they had narrow heads and faces, and -big eyebrow-ridges. Of course we dont find the skin or hair of these -people. But there is little doubt they were Caucasoids (Whites). +Capelle-Br�nn people were shorter; they had narrow heads and faces, and +big eyebrow-ridges. Of course we don�t find the skin or hair of these +people. But there is little doubt they were Caucasoids (�Whites�). Another important find came in the Italian Riviera, near Monte Carlo. Here, in a cave near Grimaldi, there was a grave containing a woman and a young boy, buried together. The two skeletons were first called -Negroid because some features of their bones were thought to resemble +�Negroid� because some features of their bones were thought to resemble certain features of modern African Negro bones. But more recently, Professor E. A. Hooton and other experts questioned the use of the word -Negroid in describing the Grimaldi skeletons. It is true that nothing +�Negroid� in describing the Grimaldi skeletons. It is true that nothing is known of the skin color, hair form, or any other fleshy feature of -the Grimaldi people, so that the word Negroid in its usual meaning is +the Grimaldi people, so that the word �Negroid� in its usual meaning is not proper here. It is also not clear whether the features of the bones -claimed to be Negroid are really so at all. +claimed to be �Negroid� are really so at all. -From a place called Wadjak, in Java, we have proto-Australoid skulls +From a place called Wadjak, in Java, we have �proto-Australoid� skulls which closely resemble those of modern Australian natives. Some of the skulls found in South Africa, especially the Boskop skull, look like those of modern Bushmen, but are much bigger. The ancestors of @@ -1159,12 +1159,12 @@ Desert. True African Negroes were forest people who apparently expanded out of the west central African area only in the last several thousand years. Although dark in skin color, neither the Australians nor the Bushmen are Negroes; neither the Wadjak nor the Boskop skulls are -Negroid. +�Negroid.� -As weve already mentioned, Professor Weidenreich believed that Peking +As we�ve already mentioned, Professor Weidenreich believed that Peking man was already on the way to becoming a Mongoloid. Anyway, the -Mongoloids would seem to have been present by the time of the Upper -Cave at Choukoutien, the _Sinanthropus_ find-spot. +Mongoloids would seem to have been present by the time of the �Upper +Cave� at Choukoutien, the _Sinanthropus_ find-spot. WHAT THE DIFFERENCES MEAN @@ -1175,14 +1175,14 @@ From area to area, men tended to look somewhat different, just as they do today. This is all quite natural. People _tended_ to mate near home; in the anthropological jargon, they made up geographically localized breeding populations. The simple continental division of -stocks--black = Africa, yellow = Asia, white = Europe--is too simple +�stocks�--black = Africa, yellow = Asia, white = Europe--is too simple a picture to fit the facts. People became accustomed to life in some -particular area within a continent (we might call it a natural area). +particular area within a continent (we might call it a �natural area�). As they went on living there, they evolved towards some particular physical variety. It would, of course, have been difficult to draw a clear boundary between two adjacent areas. There must always have been some mating across the boundaries in every case. One thing human -beings dont do, and never have done, is to mate for purity. It is +beings don�t do, and never have done, is to mate for �purity.� It is self-righteous nonsense when we try to kid ourselves into thinking that they do. @@ -1195,28 +1195,28 @@ and they must do the writing about races. I shall, however, give two modern definitions of race, and then make one comment. Dr. William G. Boyd, professor of Immunochemistry, School of - Medicine, Boston University: We may define a human race as a + Medicine, Boston University: �We may define a human race as a population which differs significantly from other human populations in regard to the frequency of one or more of the genes it - possesses. + possesses.� Professor Sherwood L. Washburn, professor of Physical Anthropology, - Department of Anthropology, the University of California: A race + Department of Anthropology, the University of California: �A �race� is a group of genetically similar populations, and races intergrade - because there are always intermediate populations. + because there are always intermediate populations.� My comment is that the ideas involved here are all biological: they concern groups, _not_ individuals. Boyd and Washburn may differ a bit -on what they want to consider a population, but a population is a +on what they want to consider a �population,� but a population is a group nevertheless, and genetics is biology to the hilt. Now a lot of people still think of race in terms of how people dress or fix their food or of other habits or customs they have. The next step is to talk -about racial purity. None of this has anything whatever to do with +about racial �purity.� None of this has anything whatever to do with race proper, which is a matter of the biology of groups. -Incidentally, Im told that if man very carefully _controls_ +Incidentally, I�m told that if man very carefully _controls_ the breeding of certain animals over generations--dogs, cattle, -chickens--he might achieve a pure race of animals. But he doesnt do +chickens--he might achieve a �pure� race of animals. But he doesn�t do it. Some unfortunate genetic trait soon turns up, so this has just as carefully to be bred out again, and so on. @@ -1240,20 +1240,20 @@ date to the second great interglacial period, about 350,000 years ago. Piltdown and Galley Hill are out, and with them, much of the starch in the old idea that there were two distinct lines of development -in human evolution: (1) a line of paleoanthropic development from +in human evolution: (1) a line of �paleoanthropic� development from Heidelberg to the Neanderthalers where it became extinct, and (2) a -very early modern line, through Piltdown, Galley Hill, Swanscombe, to +very early �modern� line, through Piltdown, Galley Hill, Swanscombe, to us. Swanscombe, Steinheim, and Ternafine are just as easily cases of very early pre-neanderthaloids. The pre-neanderthaloids were very widespread during the third interglacial: Ehringsdorf, Saccopastore, some of the Mount Carmel -people, and probably Fontchevade are cases in point. A variety of +people, and probably Font�chevade are cases in point. A variety of their descendants can be seen, from Java (Solo), Africa (Rhodesian man), and about the Mediterranean and in western Europe. As the acute cold of the last glaciation set in, the western Europeans found themselves surrounded by water, ice, or bitter cold tundra. To vastly -over-simplify it, they bred in and became classic neanderthaloids. +over-simplify it, they �bred in� and became classic neanderthaloids. But on Mount Carmel, the Skhul cave-find with its 70 per cent modern features shows what could happen elsewhere at the same time. @@ -1263,12 +1263,12 @@ modern skeletons of men. The modern skeletons differ from place to place, just as different groups of men living in different places still look different. -What became of the Neanderthalers? Nobody can tell me for sure. Ive a -hunch they were simply bred out again when the cold weather was over. +What became of the Neanderthalers? Nobody can tell me for sure. I�ve a +hunch they were simply �bred out� again when the cold weather was over. Many Americans, as the years go by, are no longer ashamed to claim they -have Indian blood in their veins. Give us a few more generations +have �Indian blood in their veins.� Give us a few more generations and there will not be very many other Americans left to whom we can -brag about it. It certainly isnt inconceivable to me to imagine a +brag about it. It certainly isn�t inconceivable to me to imagine a little Cro-Magnon boy bragging to his friends about his tough, strong, Neanderthaler great-great-great-great-grandfather! @@ -1281,15 +1281,15 @@ Cultural BEGINNINGS Men, unlike the lower animals, are made up of much more than flesh and -blood and bones; for men have culture. +blood and bones; for men have �culture.� WHAT IS CULTURE? -Culture is a word with many meanings. The doctors speak of making a -culture of a certain kind of bacteria, and ants are said to have a -culture. Then there is the Emily Post kind of culture--you say a -person is cultured, or that he isnt, depending on such things as +�Culture� is a word with many meanings. The doctors speak of making a +�culture� of a certain kind of bacteria, and ants are said to have a +�culture.� Then there is the Emily Post kind of �culture�--you say a +person is �cultured,� or that he isn�t, depending on such things as whether or not he eats peas with his knife. The anthropologists use the word too, and argue heatedly over its finer @@ -1300,7 +1300,7 @@ men from another. In this sense, a CULTURE means the way the members of a group of people think and believe and live, the tools they make, and the way they do things. Professor Robert Redfield says a culture is an organized or formalized body of conventional understandings. -Conventional understandings means the whole set of rules, beliefs, +�Conventional understandings� means the whole set of rules, beliefs, and standards which a group of people lives by. These understandings show themselves in art, and in the other things a people may make and do. The understandings continue to last, through tradition, from one @@ -1325,12 +1325,12 @@ Egyptians. I mean their beliefs as to why grain grew, as well as their ability to make tools with which to reap the grain. I mean their beliefs about life after death. What I am thinking about as culture is a thing which lasted in time. If any one Egyptian, even the Pharaoh, -died, it didnt affect the Egyptian culture of that particular moment. +died, it didn�t affect the Egyptian culture of that particular moment. PREHISTORIC CULTURES -For that long period of mans history that is all prehistory, we have +For that long period of man�s history that is all prehistory, we have no written descriptions of cultures. We find only the tools men made, the places where they lived, the graves in which they buried their dead. Fortunately for us, these tools and living places and graves all @@ -1345,15 +1345,15 @@ of the classic European Neanderthal group of men, we have found few cave-dwelling places of very early prehistoric men. First, there is the fallen-in cave where Peking man was found, near Peking. Then there are two or three other _early_, but not _very early_, possibilities. The -finds at the base of the French cave of Fontchevade, those in one of +finds at the base of the French cave of Font�chevade, those in one of the Makapan caves in South Africa, and several open sites such as Dr. -L. S. B. Leakeys Olorgesailie in Kenya doubtless all lie earlier than +L. S. B. Leakey�s Olorgesailie in Kenya doubtless all lie earlier than the time of the main European Neanderthal group, but none are so early as the Peking finds. You can see that we know very little about the home life of earlier prehistoric men. We find different kinds of early stone tools, but we -cant even be really sure which tools may have been used together. +can�t even be really sure which tools may have been used together. WHY LITTLE HAS LASTED FROM EARLY TIMES @@ -1380,11 +1380,11 @@ there first! The front of this enormous sheet of ice moved down over the country, crushing and breaking and plowing up everything, like a gigantic bulldozer. You can see what happened to our camp site. -Everything the glacier couldnt break, it pushed along in front of it +Everything the glacier couldn�t break, it pushed along in front of it or plowed beneath it. Rocks were ground to gravel, and soil was caught into the ice, which afterwards melted and ran off as muddy water. Hard -tools of flint sometimes remained whole. Human bones werent so hard; -its a wonder _any_ of them lasted. Gushing streams of melt water +tools of flint sometimes remained whole. Human bones weren�t so hard; +it�s a wonder _any_ of them lasted. Gushing streams of melt water flushed out the debris from underneath the glacier, and water flowed off the surface and through great crevasses. The hard materials these waters carried were even more rolled and ground up. Finally, such @@ -1407,26 +1407,26 @@ all up, and so we cannot say which particular sets of tools belonged together in the first place. -EOLITHS +�EOLITHS� But what sort of tools do we find earliest? For almost a century, people have been picking up odd bits of flint and other stone in the oldest Ice Age gravels in England and France. It is now thought these -odd bits of stone werent actually worked by prehistoric men. The -stones were given a name, _eoliths_, or dawn stones. You can see them +odd bits of stone weren�t actually worked by prehistoric men. The +stones were given a name, _eoliths_, or �dawn stones.� You can see them in many museums; but you can be pretty sure that very few of them were actually fashioned by men. -It is impossible to pick out eoliths that seem to be made in any -one _tradition_. By tradition I mean a set of habits for making one -kind of tool for some particular job. No two eoliths look very much +It is impossible to pick out �eoliths� that seem to be made in any +one _tradition_. By �tradition� I mean a set of habits for making one +kind of tool for some particular job. No two �eoliths� look very much alike: tools made as part of some one tradition all look much alike. -Now its easy to suppose that the very earliest prehistoric men picked -up and used almost any sort of stone. This wouldnt be surprising; you -and I do it when we go camping. In other words, some of these eoliths +Now it�s easy to suppose that the very earliest prehistoric men picked +up and used almost any sort of stone. This wouldn�t be surprising; you +and I do it when we go camping. In other words, some of these �eoliths� may actually have been used by prehistoric men. They must have used anything that might be handy when they needed it. We could have figured -that out without the eoliths. +that out without the �eoliths.� THE ROAD TO STANDARDIZATION @@ -1434,7 +1434,7 @@ THE ROAD TO STANDARDIZATION Reasoning from what we know or can easily imagine, there should have been three major steps in the prehistory of tool-making. The first step would have been simple _utilization_ of what was at hand. This is the -step into which the eoliths would fall. The second step would have +step into which the �eoliths� would fall. The second step would have been _fashioning_--the haphazard preparation of a tool when there was a need for it. Probably many of the earlier pebble tools, which I shall describe next, fall into this group. The third step would have been @@ -1447,7 +1447,7 @@ tradition appears. PEBBLE TOOLS -At the beginning of the last chapter, youll remember that I said there +At the beginning of the last chapter, you�ll remember that I said there were tools from very early geological beds. The earliest bones of men have not yet been found in such early beds although the Sterkfontein australopithecine cave approaches this early date. The earliest tools @@ -1467,7 +1467,7 @@ Old World besides Africa; in fact, some prehistorians already claim to have identified a few. Since the forms and the distinct ways of making the earlier pebble tools had not yet sufficiently jelled into a set tradition, they are difficult for us to recognize. It is not -so difficult, however, if there are great numbers of possibles +so difficult, however, if there are great numbers of �possibles� available. A little later in time the tradition becomes more clearly set, and pebble tools are easier to recognize. So far, really large collections of pebble tools have only been found and examined in Africa. @@ -1475,9 +1475,9 @@ collections of pebble tools have only been found and examined in Africa. CORE-BIFACE TOOLS -The next tradition well look at is the _core_ or biface one. The tools +The next tradition we�ll look at is the _core_ or biface one. The tools are large pear-shaped pieces of stone trimmed flat on the two opposite -sides or faces. Hence biface has been used to describe these tools. +sides or �faces.� Hence �biface� has been used to describe these tools. The front view is like that of a pear with a rather pointed top, and the back view looks almost exactly the same. Look at them side on, and you can see that the front and back faces are the same and have been @@ -1488,7 +1488,7 @@ illustration. [Illustration: ABBEVILLIAN BIFACE] We have very little idea of the way in which these core-bifaces were -used. They have been called hand axes, but this probably gives the +used. They have been called �hand axes,� but this probably gives the wrong idea, for an ax, to us, is not a pointed tool. All of these early tools must have been used for a number of jobs--chopping, scraping, cutting, hitting, picking, and prying. Since the core-bifaces tend to @@ -1505,7 +1505,7 @@ a big block of stone. You had to break off the flake in such a way that it was broad and thin, and also had a good sharp cutting edge. Once you really got on to the trick of doing it, this was probably a simpler way to make a good cutting tool than preparing a biface. You have to know -how, though; Ive tried it and have mashed my fingers more than once. +how, though; I�ve tried it and have mashed my fingers more than once. The flake tools look as if they were meant mainly for chopping, scraping, and cutting jobs. When one made a flake tool, the idea seems @@ -1535,9 +1535,9 @@ tradition. It probably has its earliest roots in the pebble tool tradition of African type. There are several kinds of tools in this tradition, but all differ from the western core-bifaces and flakes. There are broad, heavy scrapers or cleavers, and tools with an -adze-like cutting edge. These last-named tools are called hand adzes, -just as the core-bifaces of the west have often been called hand -axes. The section of an adze cutting edge is ? shaped; the section of +adze-like cutting edge. These last-named tools are called �hand adzes,� +just as the core-bifaces of the west have often been called �hand +axes.� The section of an adze cutting edge is ? shaped; the section of an ax is < shaped. [Illustration: ANYATHIAN ADZE-LIKE TOOL] @@ -1581,17 +1581,17 @@ stratification.[3] Soan (India) Flake: - Typical Mousterian + �Typical Mousterian� Levalloiso-Mousterian Levalloisian Tayacian Clactonian (localized in England) Core-biface: - Some blended elements in Mousterian + Some blended elements in �Mousterian� Micoquian (= Acheulean 6 and 7) Acheulean - Abbevillian (once called Chellean) + Abbevillian (once called �Chellean�) Pebble tool: Oldowan @@ -1608,8 +1608,8 @@ out of glacial gravels the easiest thing to do first is to isolate individual types of tools into groups. First you put a bushel-basketful of tools on a table and begin matching up types. Then you give names to the groups of each type. The groups and the types are really matters of -the archeologists choice; in real life, they were probably less exact -than the archeologists lists of them. We now know pretty well in which +the archeologists� choice; in real life, they were probably less exact +than the archeologists� lists of them. We now know pretty well in which of the early traditions the various early groups belong. @@ -1635,9 +1635,9 @@ production must have been passed on from one generation to another. I could even guess that the notions of the ideal type of one or the other of these tools stood out in the minds of men of those times -somewhat like a symbol of perfect tool for good job. If this were -so--remember its only a wild guess of mine--then men were already -symbol users. Now lets go on a further step to the fact that the words +somewhat like a symbol of �perfect tool for good job.� If this were +so--remember it�s only a wild guess of mine--then men were already +symbol users. Now let�s go on a further step to the fact that the words men speak are simply sounds, each different sound being a symbol for a different meaning. If standardized tool-making suggests symbol-making, is it also possible that crude word-symbols were also being made? I @@ -1650,7 +1650,7 @@ of our second step is more suggestive, although we may not yet feel sure that many of the earlier pebble tools were man-made products. But with the step to standardization and the appearance of the traditions, I believe we must surely be dealing with the traces of culture-bearing -_men_. The conventional understandings which Professor Redfields +_men_. The �conventional understandings� which Professor Redfield�s definition of culture suggests are now evidenced for us in the persistent habits for the preparation of stone tools. Were we able to see the other things these prehistoric men must have made--in materials @@ -1666,19 +1666,19 @@ In the last chapter, I told you that many of the older archeologists and human paleontologists used to think that modern man was very old. The supposed ages of Piltdown and Galley Hill were given as evidence of the great age of anatomically modern man, and some interpretations -of the Swanscombe and Fontchevade fossils were taken to support +of the Swanscombe and Font�chevade fossils were taken to support this view. The conclusion was that there were two parallel lines or -phyla of men already present well back in the Pleistocene. The -first of these, the more primitive or paleoanthropic line, was +�phyla� of men already present well back in the Pleistocene. The +first of these, the more primitive or �paleoanthropic� line, was said to include Heidelberg, the proto-neanderthaloids and classic -Neanderthal. The more anatomically modern or neanthropic line was +Neanderthal. The more anatomically modern or �neanthropic� line was thought to consist of Piltdown and the others mentioned above. The Neanderthaler or paleoanthropic line was thought to have become extinct after the first phase of the last great glaciation. Of course, the modern or neanthropic line was believed to have persisted into the -present, as the basis for the worlds population today. But with +present, as the basis for the world�s population today. But with Piltdown liquidated, Galley Hill known to be very late, and Swanscombe -and Fontchevade otherwise interpreted, there is little left of the +and Font�chevade otherwise interpreted, there is little left of the so-called parallel phyla theory. While the theory was in vogue, however, and as long as the European @@ -1695,9 +1695,9 @@ where they had actually been dropped by the men who made and used them. The tools came, rather, from the secondary hodge-podge of the glacial gravels. I tried to give you a picture of the bulldozing action of glaciers (p. 40) and of the erosion and weathering that were -side-effects of a glacially conditioned climate on the earths surface. +side-effects of a glacially conditioned climate on the earth�s surface. As we said above, if one simply plucks tools out of the redeposited -gravels, his natural tendency is to type the tools by groups, and to +gravels, his natural tendency is to �type� the tools by groups, and to think that the groups stand for something _on their own_. In 1906, M. Victor Commont actually made a rare find of what seems @@ -1705,15 +1705,15 @@ to have been a kind of workshop site, on a terrace above the Somme river in France. Here, Commont realized, flake tools appeared clearly in direct association with core-biface tools. Few prehistorians paid attention to Commont or his site, however. It was easier to believe -that flake tools represented a distinct culture and that this -culture was that of the Neanderthaler or paleoanthropic line, and -that the core-bifaces stood for another culture which was that of the +that flake tools represented a distinct �culture� and that this +�culture� was that of the Neanderthaler or paleoanthropic line, and +that the core-bifaces stood for another �culture� which was that of the supposed early modern or neanthropic line. Of course, I am obviously skipping many details here. Some later sites with Neanderthal fossils do seem to have only flake tools, but other such sites have both types of tools. The flake tools which appeared _with_ the core-bifaces in the Swanscombe gravels were never made much of, although it -was embarrassing for the parallel phyla people that Fontchevade +was embarrassing for the parallel phyla people that Font�chevade ran heavily to flake tools. All in all, the parallel phyla theory flourished because it seemed so neat and easy to understand. @@ -1722,20 +1722,20 @@ TRADITIONS ARE TOOL-MAKING HABITS, NOT CULTURES In case you think I simply enjoy beating a dead horse, look in any standard book on prehistory written twenty (or even ten) years ago, or -in most encyclopedias. Youll find that each of the individual tool -types, of the West, at least, was supposed to represent a culture. -The cultures were believed to correspond to parallel lines of human +in most encyclopedias. You�ll find that each of the individual tool +types, of the West, at least, was supposed to represent a �culture.� +The �cultures� were believed to correspond to parallel lines of human evolution. In 1937, Mr. Harper Kelley strongly re-emphasized the importance -of Commonts workshop site and the presence of flake tools with -core-bifaces. Next followed Dr. Movius clear delineation of the +of Commont�s workshop site and the presence of flake tools with +core-bifaces. Next followed Dr. Movius� clear delineation of the chopper-chopping tool tradition of the Far East. This spoiled the nice symmetry of the flake-tool = paleoanthropic, core-biface = neanthropic equations. Then came increasing understanding of the importance of the pebble tools in Africa, and the location of several more workshop sites there, especially at Olorgesailie in Kenya. Finally came the -liquidation of Piltdown and the deflation of Galley Hills date. So it +liquidation of Piltdown and the deflation of Galley Hill�s date. So it is at last possible to picture an individual prehistoric man making a flake tool to do one job and a core-biface tool to do another. Commont showed us this picture in 1906, but few believed him. @@ -1751,7 +1751,7 @@ that of the cave on Mount Carmel in Palestine, where the blended pre-neanderthaloid, 70 per cent modern-type skulls were found. Here, in the same level with the skulls, were 9,784 flint tools. Of these, only three--doubtless strays--were core-bifaces; all the rest were flake -tools or flake chips. We noted above how the Fontchevade cave ran to +tools or flake chips. We noted above how the Font�chevade cave ran to flake tools. The only conclusion I would draw from this is that times and circumstances did exist in which prehistoric men needed only flake tools. So they only made flake tools for those particular times and @@ -1773,13 +1773,13 @@ piece of bone. From the gravels which yield the Clactonian flakes of England comes the fire-hardened point of a wooden spear. There are also the chance finds of the fossil human bones themselves, of which we spoke in the last chapter. Aside from the cave of Peking man, none -of the earliest tools have been found in caves. Open air or workshop +of the earliest tools have been found in caves. Open air or �workshop� sites which do not seem to have been disturbed later by some geological agency are very rare. The chart on page 65 shows graphically what the situation in west-central Europe seems to have been. It is not yet certain whether -there were pebble tools there or not. The Fontchevade cave comes +there were pebble tools there or not. The Font�chevade cave comes into the picture about 100,000 years ago or more. But for the earlier hundreds of thousands of years--below the red-dotted line on the chart--the tools we find come almost entirely from the haphazard @@ -1790,13 +1790,13 @@ kinds of all-purpose tools. Almost any one of them could be used for hacking, chopping, cutting, and scraping; so the men who used them must have been living in a rough and ready sort of way. They found or hunted their food wherever they could. In the anthropological jargon, they -were food-gatherers, pure and simple. +were �food-gatherers,� pure and simple. Because of the mixture in the gravels and in the materials they -carried, we cant be sure which animals these men hunted. Bones of +carried, we can�t be sure which animals these men hunted. Bones of the larger animals turn up in the gravels, but they could just as well belong to the animals who hunted the men, rather than the other -way about. We dont know. This is why camp sites like Commonts and +way about. We don�t know. This is why camp sites like Commont�s and Olorgesailie in Kenya are so important when we do find them. The animal bones at Olorgesailie belonged to various mammals of extremely large size. Probably they were taken in pit-traps, but there are a number of @@ -1809,18 +1809,18 @@ animal. Professor F. Clark Howell recently returned from excavating another important open air site at Isimila in Tanganyika. The site yielded the bones of many fossil animals and also thousands of core-bifaces, -flakes, and choppers. But Howells reconstruction of the food-getting -habits of the Isimila people certainly suggests that the word hunting -is too dignified for what they did; scavenging would be much nearer +flakes, and choppers. But Howell�s reconstruction of the food-getting +habits of the Isimila people certainly suggests that the word �hunting� +is too dignified for what they did; �scavenging� would be much nearer the mark. During a great part of this time the climate was warm and pleasant. The second interglacial period (the time between the second and third great alpine glaciations) lasted a long time, and during much of this time -the climate may have been even better than ours is now. We dont know +the climate may have been even better than ours is now. We don�t know that earlier prehistoric men in Europe or Africa lived in caves. They may not have needed to; much of the weather may have been so nice that -they lived in the open. Perhaps they didnt wear clothes, either. +they lived in the open. Perhaps they didn�t wear clothes, either. WHAT THE PEKING CAVE-FINDS TELL US @@ -1832,7 +1832,7 @@ were bones of dangerous animals, members of the wolf, bear, and cat families. Some of the cat bones belonged to beasts larger than tigers. There were also bones of other wild animals: buffalo, camel, deer, elephants, horses, sheep, and even ostriches. Seventy per cent of the -animals Peking man killed were fallow deer. Its much too cold and dry +animals Peking man killed were fallow deer. It�s much too cold and dry in north China for all these animals to live there today. So this list helps us know that the weather was reasonably warm, and that there was enough rain to grow grass for the grazing animals. The list also helps @@ -1840,7 +1840,7 @@ the paleontologists to date the find. Peking man also seems to have eaten plant food, for there are hackberry seeds in the debris of the cave. His tools were made of sandstone and -quartz and sometimes of a rather bad flint. As weve already seen, they +quartz and sometimes of a rather bad flint. As we�ve already seen, they belong in the chopper-tool tradition. It seems fairly clear that some of the edges were chipped by right-handed people. There are also many split pieces of heavy bone. Peking man probably split them so he could @@ -1850,10 +1850,10 @@ Many of these split bones were the bones of Peking men. Each one of the skulls had already had the base broken out of it. In no case were any of the bones resting together in their natural relation to one another. There is nothing like a burial; all of the bones are scattered. Now -its true that animals could have scattered bodies that were not cared +it�s true that animals could have scattered bodies that were not cared for or buried. But splitting bones lengthwise and carefully removing the base of a skull call for both the tools and the people to use them. -Its pretty clear who the people were. Peking man was a cannibal. +It�s pretty clear who the people were. Peking man was a cannibal. * * * * * @@ -1862,8 +1862,8 @@ prehistoric men. In those days life was rough. You evidently had to watch out not only for dangerous animals but also for your fellow men. You ate whatever you could catch or find growing. But you had sense enough to build fires, and you had already formed certain habits for -making the kinds of stone tools you needed. Thats about all we know. -But I think well have to admit that cultural beginnings had been made, +making the kinds of stone tools you needed. That�s about all we know. +But I think we�ll have to admit that cultural beginnings had been made, and that these early people were really _men_. @@ -1876,16 +1876,16 @@ MORE EVIDENCE of Culture While the dating is not yet sure, the material that we get from caves in Europe must go back to about 100,000 years ago; the time of the -classic Neanderthal group followed soon afterwards. We dont know why +classic Neanderthal group followed soon afterwards. We don�t know why there is no earlier material in the caves; apparently they were not used before the last interglacial phase (the period just before the last great glaciation). We know that men of the classic Neanderthal group were living in caves from about 75,000 to 45,000 years ago. New radioactive carbon dates even suggest that some of the traces of -culture well describe in this chapter may have lasted to about 35,000 +culture we�ll describe in this chapter may have lasted to about 35,000 years ago. Probably some of the pre-neanderthaloid types of men had also lived in caves. But we have so far found their bones in caves only -in Palestine and at Fontchevade. +in Palestine and at Font�chevade. THE CAVE LAYERS @@ -1893,7 +1893,7 @@ THE CAVE LAYERS In parts of France, some peasants still live in caves. In prehistoric time, many generations of people lived in them. As a result, many caves have deep layers of debris. The first people moved in and lived -on the rock floor. They threw on the floor whatever they didnt want, +on the rock floor. They threw on the floor whatever they didn�t want, and they tracked in mud; nobody bothered to clean house in those days. Their debris--junk and mud and garbage and what not--became packed into a layer. As time went on, and generations passed, the layer grew @@ -1910,20 +1910,20 @@ earliest to latest. This is the _stratification_ we talked about (p. [Illustration: SECTION OF SHELTER ON LOWER TERRACE, LE MOUSTIER] -While we may find a mix-up in caves, its not nearly as bad as the +While we may find a mix-up in caves, it�s not nearly as bad as the mixing up that was done by glaciers. The animal bones and shells, the fireplaces, the bones of men, and the tools the men made all belong -together, if they come from one layer. Thats the reason why the cave +together, if they come from one layer. That�s the reason why the cave of Peking man is so important. It is also the reason why the caves in Europe and the Near East are so important. We can get an idea of which things belong together and which lot came earliest and which latest. In most cases, prehistoric men lived only in the mouths of caves. -They didnt like the dark inner chambers as places to live in. They +They didn�t like the dark inner chambers as places to live in. They preferred rock-shelters, at the bases of overhanging cliffs, if there was enough overhang to give shelter. When the weather was good, they no -doubt lived in the open air as well. Ill go on using the term cave -since its more familiar, but remember that I really mean rock-shelter, +doubt lived in the open air as well. I�ll go on using the term �cave� +since it�s more familiar, but remember that I really mean rock-shelter, as a place in which people actually lived. The most important European cave sites are in Spain, France, and @@ -1933,29 +1933,29 @@ found when the out-of-the-way parts of Europe, Africa, and Asia are studied. -AN INDUSTRY DEFINED +AN �INDUSTRY� DEFINED We have already seen that the earliest European cave materials are -those from the cave of Fontchevade. Movius feels certain that the +those from the cave of Font�chevade. Movius feels certain that the lowest materials here date back well into the third interglacial stage, -that which lay between the Riss (next to the last) and the Wrm I +that which lay between the Riss (next to the last) and the W�rm I (first stage of the last) alpine glaciations. This material consists of an _industry_ of stone tools, apparently all made in the flake -tradition. This is the first time we have used the word industry. +tradition. This is the first time we have used the word �industry.� It is useful to call all of the different tools found together in one layer and made of _one kind of material_ an industry; that is, the tools must be found together as men left them. Tools taken from the glacial gravels (or from windswept desert surfaces or river gravels -or any geological deposit) are not together in this sense. We might -say the latter have only geological, not archeological context. +or any geological deposit) are not �together� in this sense. We might +say the latter have only �geological,� not �archeological� context. Archeological context means finding things just as men left them. We -can tell what tools go together in an industrial sense only if we +can tell what tools go together in an �industrial� sense only if we have archeological context. -Up to now, the only things we could have called industries were the +Up to now, the only things we could have called �industries� were the worked stone industry and perhaps the worked (?) bone industry of the Peking cave. We could add some of the very clear cases of open air -sites, like Olorgesailie. We couldnt use the term for the stone tools +sites, like Olorgesailie. We couldn�t use the term for the stone tools from the glacial gravels, because we do not know which tools belonged together. But when the cave materials begin to appear in Europe, we can begin to speak of industries. Most of the European caves of this time @@ -1964,16 +1964,16 @@ contain industries of flint tools alone. THE EARLIEST EUROPEAN CAVE LAYERS -Weve just mentioned the industry from what is said to be the oldest +We�ve just mentioned the industry from what is said to be the oldest inhabited cave in Europe; that is, the industry from the deepest layer -of the site at Fontchevade. Apparently it doesnt amount to much. The +of the site at Font�chevade. Apparently it doesn�t amount to much. The tools are made of stone, in the flake tradition, and are very poorly worked. This industry is called _Tayacian_. Its type tool seems to be a smallish flake tool, but there are also larger flakes which seem to have been fashioned for hacking. In fact, the type tool seems to be simply a smaller edition of the Clactonian tool (pictured on p. 45). -None of the Fontchevade tools are really good. There are scrapers, +None of the Font�chevade tools are really good. There are scrapers, and more or less pointed tools, and tools that may have been used for hacking and chopping. Many of the tools from the earlier glacial gravels are better made than those of this first industry we see in @@ -2005,7 +2005,7 @@ core-biface and the flake traditions. The core-biface tools usually make up less than half of all the tools in the industry. However, the name of the biface type of tool is generally given to the whole industry. It is called the _Acheulean_, actually a late form of it, as -Acheulean is also used for earlier core-biface tools taken from the +�Acheulean� is also used for earlier core-biface tools taken from the glacial gravels. In western Europe, the name used is _Upper Acheulean_ or _Micoquian_. The same terms have been borrowed to name layers E and F in the Tabun cave, on Mount Carmel in Palestine. @@ -2029,7 +2029,7 @@ those used for at least one of the flake industries we shall mention presently. There is very little else in these early cave layers. We do not have -a proper industry of bone tools. There are traces of fire, and of +a proper �industry� of bone tools. There are traces of fire, and of animal bones, and a few shells. In Palestine, there are many more bones of deer than of gazelle in these layers; the deer lives in a wetter climate than does the gazelle. In the European cave layers, the @@ -2043,18 +2043,18 @@ bones of fossil men definitely in place with this industry. FLAKE INDUSTRIES FROM THE CAVES Two more stone industries--the _Levalloisian_ and the -_Mousterian_--turn up at approximately the same time in the European +�_Mousterian_�--turn up at approximately the same time in the European cave layers. Their tools seem to be mainly in the flake tradition, but according to some of the authorities their preparation also shows some combination with the habits by which the core-biface tools were prepared. -Now notice that I dont tell you the Levalloisian and the Mousterian +Now notice that I don�t tell you the Levalloisian and the �Mousterian� layers are both above the late Acheulean layers. Look at the cave -section (p. 57) and youll find that some Mousterian of Acheulean -tradition appears above some typical Mousterian. This means that +section (p. 57) and you�ll find that some �Mousterian of Acheulean +tradition� appears above some �typical Mousterian.� This means that there may be some kinds of Acheulean industries that are later than -some kinds of Mousterian. The same is true of the Levalloisian. +some kinds of �Mousterian.� The same is true of the Levalloisian. There were now several different kinds of habits that men used in making stone tools. These habits were based on either one or the other @@ -2072,7 +2072,7 @@ were no patent laws in those days. The extremely complicated interrelationships of the different habits used by the tool-makers of this range of time are at last being -systematically studied. M. Franois Bordes has developed a statistical +systematically studied. M. Fran�ois Bordes has developed a statistical method of great importance for understanding these tool preparation habits. @@ -2081,22 +2081,22 @@ THE LEVALLOISIAN AND MOUSTERIAN The easiest Levalloisian tool to spot is a big flake tool. The trick in making it was to fashion carefully a big chunk of stone (called -the Levalloisian tortoise core, because it resembles the shape of +the Levalloisian �tortoise core,� because it resembles the shape of a turtle-shell) and then to whack this in such a way that a large flake flew off. This large thin flake, with sharp cutting edges, is the finished Levalloisian tool. There were various other tools in a Levalloisian industry, but this is the characteristic _Levalloisian_ tool. -There are several typical Mousterian stone tools. Different from -the tools of the Levalloisian type, these were made from disc-like -cores. There are medium-sized flake side scrapers. There are also -some small pointed tools and some small hand axes. The last of these +There are several �typical Mousterian� stone tools. Different from +the tools of the Levalloisian type, these were made from �disc-like +cores.� There are medium-sized flake �side scrapers.� There are also +some small pointed tools and some small �hand axes.� The last of these tool types is often a flake worked on both of the flat sides (that is, bifacially). There are also pieces of flint worked into the form of crude balls. The pointed tools may have been fixed on shafts to make short jabbing spears; the round flint balls may have been used as -bolas. Actually, we dont _know_ what either tool was used for. The +bolas. Actually, we don�t _know_ what either tool was used for. The points and side scrapers are illustrated (pp. 64 and 66). [Illustration: LEVALLOIS FLAKE] @@ -2108,9 +2108,9 @@ Nowadays the archeologists are less and less sure of the importance of any one specific tool type and name. Twenty years ago, they used to speak simply of Acheulean or Levalloisian or Mousterian tools. Now, more and more, _all_ of the tools from some one layer in a -cave are called an industry, which is given a mixed name. Thus we -have Levalloiso-Mousterian, and Acheuleo-Levalloisian, and even -Acheuleo-Mousterian (or Mousterian of Acheulean tradition). Bordes +cave are called an �industry,� which is given a mixed name. Thus we +have �Levalloiso-Mousterian,� and �Acheuleo-Levalloisian,� and even +�Acheuleo-Mousterian� (or �Mousterian of Acheulean tradition�). Bordes� systematic work is beginning to clear up some of our confusion. The time of these late Acheuleo-Levalloiso-Mousterioid industries @@ -2120,16 +2120,16 @@ phase of the last great glaciation. It was also the time that the classic group of Neanderthal men was living in Europe. A number of the Neanderthal fossil finds come from these cave layers. Before the different habits of tool preparation were understood it used to be -popular to say Neanderthal man was Mousterian man. I think this is -wrong. What used to be called Mousterian is now known to be a variety +popular to say Neanderthal man was �Mousterian man.� I think this is +wrong. What used to be called �Mousterian� is now known to be a variety of industries with tools of both core-biface and flake habits, and -so mixed that the word Mousterian used alone really doesnt mean +so mixed that the word �Mousterian� used alone really doesn�t mean anything. The Neanderthalers doubtless understood the tool preparation habits by means of which Acheulean, Levalloisian and Mousterian type tools were produced. We also have the more modern-like Mount Carmel people, found in a cave layer of Palestine with tools almost entirely -in the flake tradition, called Levalloiso-Mousterian, and the -Fontchevade-Tayacian (p. 59). +in the flake tradition, called �Levalloiso-Mousterian,� and the +Font�chevade-Tayacian (p. 59). [Illustration: MOUSTERIAN POINT] @@ -2165,7 +2165,7 @@ which seem to have served as anvils or chopping blocks, are fairly common. Bits of mineral, used as coloring matter, have also been found. We -dont know what the color was used for. +don�t know what the color was used for. [Illustration: MOUSTERIAN SIDE SCRAPER] @@ -2230,7 +2230,7 @@ might suggest some notion of hoarding up the spirits or the strength of bears killed in the hunt. Probably the people lived in small groups, as hunting and food-gathering seldom provide enough food for large groups of people. These groups probably had some kind of leader or -chief. Very likely the rude beginnings of rules for community life +�chief.� Very likely the rude beginnings of rules for community life and politics, and even law, were being made. But what these were, we do not know. We can only guess about such things, as we can only guess about many others; for example, how the idea of a family must have been @@ -2246,8 +2246,8 @@ small. The mixtures and blendings of the habits used in making stone tools must mean that there were also mixtures and blends in many of the other ideas and beliefs of these small groups. And what this probably means is that there was no one _culture_ of the time. It is -certainly unlikely that there were simply three cultures, Acheulean, -Levalloisian, and Mousterian, as has been thought in the past. +certainly unlikely that there were simply three cultures, �Acheulean,� +�Levalloisian,� and �Mousterian,� as has been thought in the past. Rather there must have been a great variety of loosely related cultures at about the same stage of advancement. We could say, too, that here we really begin to see, for the first time, that remarkable ability @@ -2272,7 +2272,7 @@ related habits for the making of tools. But the men who made them must have looked much like the men of the West. Their tools were different, but just as useful. -As to what the men of the West looked like, Ive already hinted at all +As to what the men of the West looked like, I�ve already hinted at all we know so far (pp. 29 ff.). The Neanderthalers were present at the time. Some more modern-like men must have been about, too, since fossils of them have turned up at Mount Carmel in Palestine, and at @@ -2306,7 +2306,7 @@ A NEW TRADITION APPEARS Something new was probably beginning to happen in the European-Mediterranean area about 40,000 years ago, though all the rest of the Old World seems to have been going on as it had been. I -cant be sure of this because the information we are using as a basis +can�t be sure of this because the information we are using as a basis for dates is very inaccurate for the areas outside of Europe and the Mediterranean. @@ -2325,7 +2325,7 @@ drawing shows. It has sharp cutting edges, and makes a very useful knife. The real trick is to be able to make one. It is almost impossible to make a blade out of any stone but flint or a natural volcanic glass called obsidian. And even if you have flint or obsidian, -you first have to work up a special cone-shaped blade-core, from +you first have to work up a special cone-shaped �blade-core,� from which to whack off blades. [Illustration: PLAIN BLADE] @@ -2351,8 +2351,8 @@ found in equally early cave levels in Syria; their popularity there seems to fluctuate a bit. Some more or less parallel-sided flakes are known in the Levalloisian industry in France, but they are probably no earlier than Tabun E. The Tabun blades are part of a local late -Acheulean industry, which is characterized by core-biface hand -axes, but which has many flake tools as well. Professor F. E. +�Acheulean� industry, which is characterized by core-biface �hand +axes,� but which has many flake tools as well. Professor F. E. Zeuner believes that this industry may be more than 120,000 years old; actually its date has not yet been fixed, but it is very old--older than the fossil finds of modern-like men in the same caves. @@ -2371,7 +2371,7 @@ We are not sure just where the earliest _persisting_ habits for the production of blade tools developed. Impressed by the very early momentary appearance of blades at Tabun on Mount Carmel, Professor Dorothy A. Garrod first favored the Near East as a center of origin. -She spoke of some as yet unidentified Asiatic centre, which she +She spoke of �some as yet unidentified Asiatic centre,� which she thought might be in the highlands of Iran or just beyond. But more recent work has been done in this area, especially by Professor Coon, and the blade tools do not seem to have an early appearance there. When @@ -2395,21 +2395,21 @@ core (and the striking of the Levalloisian flake from it) might have followed through to the conical core and punch technique for the production of blades. Professor Garrod is much impressed with the speed of change during the later phases of the last glaciation, and its -probable consequences. She speaks of the greater number of industries +probable consequences. She speaks of �the greater number of industries having enough individual character to be classified as distinct ... -since evolution now starts to outstrip diffusion. Her evolution here +since evolution now starts to outstrip diffusion.� Her �evolution� here is of course an industrial evolution rather than a biological one. Certainly the people of Europe had begun to make blade tools during the warm spell after the first phase of the last glaciation. By about 40,000 years ago blades were well established. The bones of the blade -tool makers weve found so far indicate that anatomically modern men +tool makers we�ve found so far indicate that anatomically modern men had now certainly appeared. Unfortunately, only a few fossil men have so far been found from the very beginning of the blade tool range in Europe (or elsewhere). What I certainly shall _not_ tell you is that conquering bands of fine, strong, anatomically modern men, armed with superior blade tools, came sweeping out of the East to exterminate the -lowly Neanderthalers. Even if we dont know exactly what happened, Id -lay a good bet it wasnt that simple. +lowly Neanderthalers. Even if we don�t know exactly what happened, I�d +lay a good bet it wasn�t that simple. We do know a good deal about different blade industries in Europe. Almost all of them come from cave layers. There is a great deal of @@ -2418,7 +2418,7 @@ this complication; in fact, it doubtless simplifies it too much. But it may suggest all the complication of industries which is going on at this time. You will note that the upper portion of my much simpler chart (p. 65) covers the same material (in the section -marked Various Blade-Tool Industries). That chart is certainly too +marked �Various Blade-Tool Industries�). That chart is certainly too simplified. You will realize that all this complication comes not only from @@ -2429,7 +2429,7 @@ a good deal of climatic change at this time. The plants and animals that men used for food were changing, too. The great variety of tools and industries we now find reflect these changes and the ability of men to keep up with the times. Now, for example, is the first time we are -sure that there are tools to _make_ other tools. They also show mens +sure that there are tools to _make_ other tools. They also show men�s increasing ability to adapt themselves. @@ -2437,15 +2437,15 @@ SPECIAL TYPES OF BLADE TOOLS The most useful tools that appear at this time were made from blades. - 1. The backed blade. This is a knife made of a flint blade, with - one edge purposely blunted, probably to save the users fingers + 1. The �backed� blade. This is a knife made of a flint blade, with + one edge purposely blunted, probably to save the user�s fingers from being cut. There are several shapes of backed blades (p. 73). [Illustration: TWO BURINS] - 2. The _burin_ or graver. The burin was the original chisel. Its - cutting edge is _transverse_, like a chisels. Some burins are + 2. The _burin_ or �graver.� The burin was the original chisel. Its + cutting edge is _transverse_, like a chisel�s. Some burins are made like a screw-driver, save that burins are sharp. Others have edges more like the blade of a chisel or a push plane, with only one bevel. Burins were probably used to make slots in wood @@ -2456,29 +2456,29 @@ The most useful tools that appear at this time were made from blades. [Illustration: TANGED POINT] - 3. The tanged point. These stone points were used to tip arrows or + 3. The �tanged� point. These stone points were used to tip arrows or light spears. They were made from blades, and they had a long tang at the bottom where they were fixed to the shaft. At the place where the tang met the main body of the stone point, there was - a marked shoulder, the beginnings of a barb. Such points had + a marked �shoulder,� the beginnings of a barb. Such points had either one or two shoulders. [Illustration: NOTCHED BLADE] - 4. The notched or strangulated blade. Along with the points for + 4. The �notched� or �strangulated� blade. Along with the points for arrows or light spears must go a tool to prepare the arrow or - spear shaft. Today, such a tool would be called a draw-knife or - a spoke-shave, and this is what the notched blades probably are. + spear shaft. Today, such a tool would be called a �draw-knife� or + a �spoke-shave,� and this is what the notched blades probably are. Our spoke-shaves have sharp straight cutting blades and really - shave. Notched blades of flint probably scraped rather than cut. + �shave.� Notched blades of flint probably scraped rather than cut. - 5. The awl, drill, or borer. These blade tools are worked out + 5. The �awl,� �drill,� or �borer.� These blade tools are worked out to a spike-like point. They must have been used for making holes in wood, bone, shell, skin, or other things. [Illustration: DRILL OR AWL] - 6. The end-scraper on a blade is a tool with one or both ends + 6. The �end-scraper on a blade� is a tool with one or both ends worked so as to give a good scraping edge. It could have been used to hollow out wood or bone, scrape hides, remove bark from trees, and a number of other things (p. 78). @@ -2489,11 +2489,11 @@ usually made of blades, but the best examples are so carefully worked on both sides (bifacially) that it is impossible to see the original blade. This tool is - 7. The laurel leaf point. Some of these tools were long and + 7. The �laurel leaf� point. Some of these tools were long and dagger-like, and must have been used as knives or daggers. Others - were small, called willow leaf, and must have been mounted on + were small, called �willow leaf,� and must have been mounted on spear or arrow shafts. Another typical Solutrean tool is the - shouldered point. Both the laurel leaf and shouldered point + �shouldered� point. Both the �laurel leaf� and �shouldered� point types are illustrated (see above and p. 79). [Illustration: END-SCRAPER ON A BLADE] @@ -2507,17 +2507,17 @@ second is a core tool. [Illustration: SHOULDERED POINT] - 8. The keel-shaped round scraper is usually small and quite round, + 8. The �keel-shaped round scraper� is usually small and quite round, and has had chips removed up to a peak in the center. It is called - keel-shaped because it is supposed to look (when upside down) + �keel-shaped� because it is supposed to look (when upside down) like a section through a boat. Actually, it looks more like a tent or an umbrella. Its outer edges are sharp all the way around, and it was probably a general purpose scraping tool (see illustration, p. 81). - 9. The keel-shaped nosed scraper is a much larger and heavier tool + 9. The �keel-shaped nosed scraper� is a much larger and heavier tool than the round scraper. It was made on a core with a flat bottom, - and has one nicely worked end or nose. Such tools are usually + and has one nicely worked end or �nose.� Such tools are usually large enough to be easily grasped, and probably were used like push planes (see illustration, p. 81). @@ -2530,7 +2530,7 @@ the most easily recognized blade tools, although they show differences in detail at different times. There are also many other kinds. Not all of these tools appear in any one industry at one time. Thus the different industries shown in the chart (p. 72) each have only some -of the blade tools weve just listed, and also a few flake tools. Some +of the blade tools we�ve just listed, and also a few flake tools. Some industries even have a few core tools. The particular types of blade tools appearing in one cave layer or another, and the frequency of appearance of the different types, tell which industry we have in each @@ -2545,15 +2545,15 @@ to appear. There are knives, pins, needles with eyes, and little double-pointed straight bars of bone that were probably fish-hooks. The fish-line would have been fastened in the center of the bar; when the fish swallowed the bait, the bar would have caught cross-wise in the -fishs mouth. +fish�s mouth. One quite special kind of bone tool is a long flat point for a light spear. It has a deep notch cut up into the breadth of its base, and is -called a split-based bone point (p. 82). We know examples of bone +called a �split-based bone point� (p. 82). We know examples of bone beads from these times, and of bone handles for flint tools. Pierced teeth of some animals were worn as beads or pendants, but I am not sure -that elks teeth were worn this early. There are even spool-shaped -buttons or toggles. +that elks� teeth were worn this early. There are even spool-shaped +�buttons� or toggles. [Illustration: SPLIT-BASED BONE POINT] @@ -2595,12 +2595,12 @@ almost to have served as sketch blocks. The surfaces of these various objects may show animals, or rather abstract floral designs, or geometric designs. -[Illustration: VENUS FIGURINE FROM WILLENDORF] +[Illustration: �VENUS� FIGURINE FROM WILLENDORF] Some of the movable art is not done on tools. The most remarkable examples of this class are little figures of women. These women seem to be pregnant, and their most female characteristics are much emphasized. -It is thought that these Venus or Mother-goddess figurines may be +It is thought that these �Venus� or �Mother-goddess� figurines may be meant to show the great forces of nature--fertility and the birth of life. @@ -2616,21 +2616,21 @@ are different styles in the cave art. The really great cave art is pretty well restricted to southern France and Cantabrian (northwestern) Spain. -There are several interesting things about the Franco-Cantabrian cave +There are several interesting things about the �Franco-Cantabrian� cave art. It was done deep down in the darkest and most dangerous parts of the caves, although the men lived only in the openings of caves. If you think what they must have had for lights--crude lamps of hollowed stone have been found, which must have burned some kind of oil or grease, with a matted hair or fiber wick--and of the animals that may have -lurked in the caves, youll understand the part about danger. Then, -too, were sure the pictures these people painted were not simply to be +lurked in the caves, you�ll understand the part about danger. Then, +too, we�re sure the pictures these people painted were not simply to be looked at and admired, for they painted one picture right over other pictures which had been done earlier. Clearly, it was the _act_ of _painting_ that counted. The painter had to go way down into the most mysterious depths of the earth and create an animal in paint. Possibly he believed that by doing this he gained some sort of magic power over the same kind of animal when he hunted it in the open air. It certainly -doesnt look as if he cared very much about the picture he painted--as +doesn�t look as if he cared very much about the picture he painted--as a finished product to be admired--for he or somebody else soon went down and painted another animal right over the one he had done. @@ -2683,10 +2683,10 @@ it. Their art is another example of the direction the human mind was taking. And when I say human, I mean it in the fullest sense, for this is the time in which fully modern man has appeared. On page 34, we -spoke of the Cro-Magnon group and of the Combe Capelle-Brnn group of -Caucasoids and of the Grimaldi Negroids, who are no longer believed +spoke of the Cro-Magnon group and of the Combe Capelle-Br�nn group of +Caucasoids and of the Grimaldi �Negroids,� who are no longer believed to be Negroid. I doubt that any one of these groups produced most of -the achievements of the times. Its not yet absolutely sure which +the achievements of the times. It�s not yet absolutely sure which particular group produced the great cave art. The artists were almost certainly a blend of several (no doubt already mixed) groups. The pair of Grimaldians were buried in a grave with a sprinkling of red ochre, @@ -2705,9 +2705,9 @@ also found about the shore of the Mediterranean basin, and it moved into northern Europe as the last glaciation pulled northward. People began making blade tools of very small size. They learned how to chip very slender and tiny blades from a prepared core. Then they made these -little blades into tiny triangles, half-moons (lunates), trapezoids, +little blades into tiny triangles, half-moons (�lunates�), trapezoids, and several other geometric forms. These little tools are called -microliths. They are so small that most of them must have been fixed +�microliths.� They are so small that most of them must have been fixed in handles or shafts. [Illustration: MICROLITHS @@ -2726,7 +2726,7 @@ One corner of each little triangle stuck out, and the whole thing made a fine barbed harpoon. In historic times in Egypt, geometric trapezoidal microliths were still in use as arrowheads. They were fastened--broad end out--on the end of an arrow shaft. It seems queer -to give an arrow a point shaped like a T. Actually, the little points +to give an arrow a point shaped like a �T.� Actually, the little points were very sharp, and must have pierced the hides of animals very easily. We also think that the broader cutting edge of the point may have caused more bleeding than a pointed arrowhead would. In hunting @@ -2739,7 +2739,7 @@ is some evidence that they appear early in the Near East. Their use was very common in northwest Africa but this came later. The microlith makers who reached south Russia and central Europe possibly moved up out of the Near East. Or it may have been the other way around; we -simply dont yet know. +simply don�t yet know. Remember that the microliths we are talking about here were made from carefully prepared little blades, and are often geometric in outline. @@ -2749,7 +2749,7 @@ even some flake scrapers, in most microlithic industries. I emphasize this bladelet and the geometric character of the microlithic industries of the western Old World, since there has sometimes been confusion in the matter. Sometimes small flake chips, utilized as minute pointed -tools, have been called microliths. They may be _microlithic_ in size +tools, have been called �microliths.� They may be _microlithic_ in size in terms of the general meaning of the word, but they do not seem to belong to the sub-tradition of the blade tool preparation habits which we have been discussing here. @@ -2763,10 +2763,10 @@ in western Asia too, and early, although Professor Garrod is no longer sure that the whole tradition originated in the Near East. If you look again at my chart (p. 72) you will note that in western Asia I list some of the names of the western European industries, but with the -qualification -like (for example, Gravettian-like). The western +qualification �-like� (for example, �Gravettian-like�). The western Asiatic blade-tool industries do vaguely recall some aspects of those of western Europe, but we would probably be better off if we used -completely local names for them. The Emiran of my chart is such an +completely local names for them. The �Emiran� of my chart is such an example; its industry includes a long spike-like blade point which has no western European counterpart. @@ -2774,13 +2774,13 @@ When we last spoke of Africa (p. 66), I told you that stone tools there were continuing in the Levalloisian flake tradition, and were becoming smaller. At some time during this process, two new tool types appeared in northern Africa: one was the Aterian point with -a tang (p. 67), and the other was a sort of laurel leaf point, -called the Sbaikian. These two tool types were both produced from +a tang (p. 67), and the other was a sort of �laurel leaf� point, +called the �Sbaikian.� These two tool types were both produced from flakes. The Sbaikian points, especially, are roughly similar to some of the Solutrean points of Europe. It has been suggested that both the Sbaikian and Aterian points may be seen on their way to France through their appearance in the Spanish cave deposits of Parpallo, but there is -also a rival pre-Solutrean in central Europe. We still do not know +also a rival �pre-Solutrean� in central Europe. We still do not know whether there was any contact between the makers of these north African tools and the Solutrean tool-makers. What does seem clear is that the blade-tool tradition itself arrived late in northern Africa. @@ -2788,11 +2788,11 @@ blade-tool tradition itself arrived late in northern Africa. NETHER AFRICA -Blade tools and laurel leaf points and some other probably late +Blade tools and �laurel leaf� points and some other probably late stone tool types also appear in central and southern Africa. There are geometric microliths on bladelets and even some coarse pottery in east Africa. There is as yet no good way of telling just where these -items belong in time; in broad geological terms they are late. +items belong in time; in broad geological terms they are �late.� Some people have guessed that they are as early as similar European and Near Eastern examples, but I doubt it. The makers of small-sized Levalloisian flake tools occupied much of Africa until very late in @@ -2823,18 +2823,18 @@ ancestors of the American Indians came from Asia. The stone-tool traditions of Europe, Africa, the Near and Middle East, and central Siberia, did _not_ move into the New World. With only a very few special or late exceptions, there are _no_ core-bifaces, -flakes, or blade tools of the Old World. Such things just havent been +flakes, or blade tools of the Old World. Such things just haven�t been found here. -This is why I say its a shame we dont know more of the end of the +This is why I say it�s a shame we don�t know more of the end of the chopper-tool tradition in the Far East. According to Weidenreich, the Mongoloids were in the Far East long before the end of the last glaciation. If the genetics of the blood group types do demand a non-Mongoloid ancestry for the American Indians, who else may have been in the Far East 25,000 years ago? We know a little about the habits for making stone tools which these first people brought with them, -and these habits dont conform with those of the western Old World. -Wed better keep our eyes open for whatever happened to the end of +and these habits don�t conform with those of the western Old World. +We�d better keep our eyes open for whatever happened to the end of the chopper-tool tradition in northern China; already there are hints that it lasted late there. Also we should watch future excavations in eastern Siberia. Perhaps we shall find the chopper-tool tradition @@ -2846,13 +2846,13 @@ THE NEW ERA Perhaps it comes in part from the way I read the evidence and perhaps in part it is only intuition, but I feel that the materials of this chapter suggest a new era in the ways of life. Before about 40,000 -years ago, people simply gathered their food, wandering over large +years ago, people simply �gathered� their food, wandering over large areas to scavenge or to hunt in a simple sort of way. But here we -have seen them settling-in more, perhaps restricting themselves in +have seen them �settling-in� more, perhaps restricting themselves in their wanderings and adapting themselves to a given locality in more intensive ways. This intensification might be suggested by the word -collecting. The ways of life we described in the earlier chapters -were food-gathering ways, but now an era of food-collecting has +�collecting.� The ways of life we described in the earlier chapters +were �food-gathering� ways, but now an era of �food-collecting� has begun. We shall see further intensifications of it in the next chapter. @@ -2883,8 +2883,8 @@ The last great glaciation of the Ice Age was a two-part affair, with a sub-phase at the end of the second part. In Europe the last sub-phase of this glaciation commenced somewhere around 15,000 years ago. Then the glaciers began to melt back, for the last time. Remember that -Professor Antevs (p. 19) isnt sure the Ice Age is over yet! This -melting sometimes went by fits and starts, and the weather wasnt +Professor Antevs (p. 19) isn�t sure the Ice Age is over yet! This +melting sometimes went by fits and starts, and the weather wasn�t always changing for the better; but there was at least one time when European weather was even better than it is now. @@ -2927,16 +2927,16 @@ Sweden. Much of this north European material comes from bogs and swamps where it had become water-logged and has kept very well. Thus we have much more complete _assemblages_[4] than for any time earlier. - [4] Assemblage is a useful word when there are different kinds of + [4] �Assemblage� is a useful word when there are different kinds of archeological materials belonging together, from one area and of - one time. An assemblage is made up of a number of industries + one time. An assemblage is made up of a number of �industries� (that is, all the tools in chipped stone, all the tools in bone, all the tools in wood, the traces of houses, etc.) and everything else that manages to survive, such as the art, the burials, the bones of the animals used as food, and the traces of plant foods; in fact, everything that has been left to us and can be used to help reconstruct the lives of the people to - whom it once belonged. Our own present-day assemblage would be + whom it once belonged. Our own present-day �assemblage� would be the sum total of all the objects in our mail-order catalogues, department stores and supply houses of every sort, our churches, our art galleries and other buildings, together with our roads, @@ -2976,7 +2976,7 @@ found. It seems likely that the Maglemosian bog finds are remains of summer camps, and that in winter the people moved to higher and drier regions. -Childe calls them the Forest folk; they probably lived much the +Childe calls them the �Forest folk�; they probably lived much the same sort of life as did our pre-agricultural Indians of the north central states. They hunted small game or deer; they did a great deal of fishing; they collected what plant food they could find. In fact, @@ -3010,7 +3010,7 @@ South of the north European belt the hunting-food-collecting peoples were living on as best they could during this time. One interesting group, which seems to have kept to the regions of sandy soil and scrub forest, made great quantities of geometric microliths. These are the -materials called _Tardenoisian_. The materials of the Forest folk of +materials called _Tardenoisian_. The materials of the �Forest folk� of France and central Europe generally are called _Azilian_; Dr. Movius believes the term might best be restricted to the area south of the Loire River. @@ -3032,24 +3032,24 @@ to it than this. Professor Mathiassen of Copenhagen, who knows the archeological remains of this time very well, poses a question. He speaks of the material -as being neither rich nor progressive, in fact rather stagnant, but -he goes on to add that the people had a certain receptiveness and +as being neither rich nor progressive, in fact �rather stagnant,� but +he goes on to add that the people had a certain �receptiveness� and were able to adapt themselves quickly when the next change did come. -My own understanding of the situation is that the Forest folk made +My own understanding of the situation is that the �Forest folk� made nothing as spectacular as had the producers of the earlier Magdalenian assemblage and the Franco-Cantabrian art. On the other hand, they _seem_ to have been making many more different kinds of tools for many more different kinds of tasks than had their Ice Age forerunners. I -emphasize seem because the preservation in the Maglemosian bogs +emphasize �seem� because the preservation in the Maglemosian bogs is very complete; certainly we cannot list anywhere near as many different things for earlier times as we did for the Maglemosians (p. 94). I believe this experimentation with all kinds of new tools and gadgets, this intensification of adaptiveness (p. 91), this -receptiveness, even if it is still only pointed toward hunting, +�receptiveness,� even if it is still only pointed toward hunting, fishing, and food-collecting, is an important thing. Remember that the only marker we have handy for the _beginning_ of -this tendency toward receptiveness and experimentation is the +this tendency toward �receptiveness� and experimentation is the little microlithic blade tools of various geometric forms. These, we saw, began before the last ice had melted away, and they lasted on in use for a very long time. I wish there were a better marker than @@ -3063,7 +3063,7 @@ CHANGES IN OTHER AREAS? All this last section was about Europe. How about the rest of the world when the last glaciers were melting away? -We simply dont know much about this particular time in other parts +We simply don�t know much about this particular time in other parts of the world except in Europe, the Mediterranean basin and the Middle East. People were certainly continuing to move into the New World by way of Siberia and the Bering Strait about this time. But for the @@ -3075,10 +3075,10 @@ clear information. REAL CHANGE AND PRELUDE IN THE NEAR EAST The appearance of the microliths and the developments made by the -Forest folk of northwestern Europe also mark an end. They show us +�Forest folk� of northwestern Europe also mark an end. They show us the terminal phase of the old food-collecting way of life. It grows increasingly clear that at about the same time that the Maglemosian and -other Forest folk were adapting themselves to hunting, fishing, and +other �Forest folk� were adapting themselves to hunting, fishing, and collecting in new ways to fit the post-glacial environment, something completely new was being made ready in western Asia. @@ -3098,7 +3098,7 @@ simply gathering or collecting it. When their food-production became reasonably effective, people could and did settle down in village-farming communities. With the appearance of the little farming villages, a new way of life was actually under way. Professor Childe -has good reason to speak of the food-producing revolution, for it was +has good reason to speak of the �food-producing revolution,� for it was indeed a revolution. @@ -3117,8 +3117,8 @@ before the _how_ and _why_ answers begin to appear. Anthropologically trained archeologists are fascinated with the cultures of men in times of great change. About ten or twelve thousand years ago, the general level of culture in many parts of the world seems to have been ready -for change. In northwestern Europe, we saw that cultures changed -just enough so that they would not have to change. We linked this to +for change. In northwestern Europe, we saw that cultures �changed +just enough so that they would not have to change.� We linked this to environmental changes with the coming of post-glacial times. In western Asia, we archeologists can prove that the food-producing @@ -3155,7 +3155,7 @@ living as the Maglemosians did? These are the questions we still have to face. -CULTURAL RECEPTIVENESS AND PROMISING ENVIRONMENTS +CULTURAL �RECEPTIVENESS� AND PROMISING ENVIRONMENTS Until the archeologists and the natural scientists--botanists, geologists, zoologists, and general ecologists--have spent many more @@ -3163,15 +3163,15 @@ years on the problem, we shall not have full _how_ and _why_ answers. I do think, however, that we are beginning to understand what to look for. We shall have to learn much more of what makes the cultures of men -receptive and experimental. Did change in the environment alone -force it? Was it simply a case of Professor Toynbees challenge and -response? I cannot believe the answer is quite that simple. Were it -so simple, we should want to know why the change hadnt come earlier, +�receptive� and experimental. Did change in the environment alone +force it? Was it simply a case of Professor Toynbee�s �challenge and +response?� I cannot believe the answer is quite that simple. Were it +so simple, we should want to know why the change hadn�t come earlier, along with earlier environmental changes. We shall not know the answer, however, until we have excavated the traces of many more cultures of the time in question. We shall doubtless also have to learn more about, and think imaginatively about, the simpler cultures still left today. -The mechanics of culture in general will be bound to interest us. +The �mechanics� of culture in general will be bound to interest us. It will also be necessary to learn much more of the environments of 10,000 to 12,000 years ago. In which regions of the world were the @@ -3228,7 +3228,7 @@ THE OLD THEORY TOO SIMPLE FOR THE FACTS This theory was set up before we really knew anything in detail about the later prehistory of the Near and Middle East. We now know that -the facts which have been found dont fit the old theory at all well. +the facts which have been found don�t fit the old theory at all well. Also, I have yet to find an American meteorologist who feels that we know enough about the changes in the weather pattern to say that it can have been so simple and direct. And, of course, the glacial ice which @@ -3238,7 +3238,7 @@ of great alpine glaciers, and long periods of warm weather in between. If the rain belt moved north as the glaciers melted for the last time, it must have moved in the same direction in earlier times. Thus, the forced neighborliness of men, plants, and animals in river valleys and -oases must also have happened earlier. Why didnt domestication happen +oases must also have happened earlier. Why didn�t domestication happen earlier, then? Furthermore, it does not seem to be in the oases and river valleys @@ -3275,20 +3275,20 @@ archeologists, probably through habit, favor an old scheme of Grecized names for the subdivisions: paleolithic, mesolithic, neolithic. I refuse to use these words myself. They have meant too many different things to too many different people and have tended to hide some pretty -fuzzy thinking. Probably you havent even noticed my own scheme of -subdivision up to now, but Id better tell you in general what it is. +fuzzy thinking. Probably you haven�t even noticed my own scheme of +subdivision up to now, but I�d better tell you in general what it is. I think of the earliest great group of archeological materials, from which we can deduce only a food-gathering way of culture, as the -_food-gathering stage_. I say stage rather than age, because it +_food-gathering stage_. I say �stage� rather than �age,� because it is not quite over yet; there are still a few primitive people in out-of-the-way parts of the world who remain in the _food-gathering stage_. In fact, Professor Julian Steward would probably prefer to call it a food-gathering _level_ of existence, rather than a stage. This would be perfectly acceptable to me. I also tend to find myself using _collecting_, rather than _gathering_, for the more recent aspects or -era of the stage, as the word collecting appears to have more sense -of purposefulness and specialization than does gathering (see p. +era of the stage, as the word �collecting� appears to have more sense +of purposefulness and specialization than does �gathering� (see p. 91). Now, while I think we could make several possible subdivisions of the @@ -3297,22 +3297,22 @@ believe the only one which means much to us here is the last or _terminal sub-era of food-collecting_ of the whole food-gathering stage. The microliths seem to mark its approach in the northwestern part of the Old World. It is really shown best in the Old World by -the materials of the Forest folk, the cultural adaptation to the +the materials of the �Forest folk,� the cultural adaptation to the post-glacial environment in northwestern Europe. We talked about -the Forest folk at the beginning of this chapter, and I used the +the �Forest folk� at the beginning of this chapter, and I used the Maglemosian assemblage of Denmark as an example. [5] It is difficult to find words which have a sequence or gradation of meaning with respect to both development and a range of time in the past, or with a range of time from somewhere in the past which is perhaps not yet ended. One standard Webster definition - of _stage_ is: One of the steps into which the material - development of man ... is divided. I cannot find any dictionary + of _stage_ is: �One of the steps into which the material + development of man ... is divided.� I cannot find any dictionary definition that suggests which of the words, _stage_ or _era_, has the meaning of a longer span of time. Therefore, I have chosen to let my eras be shorter, and to subdivide my stages - into eras. Webster gives _era_ as: A signal stage of history, - an epoch. When I want to subdivide my eras, I find myself using + into eras. Webster gives _era_ as: �A signal stage of history, + an epoch.� When I want to subdivide my eras, I find myself using _sub-eras_. Thus I speak of the _eras_ within a _stage_ and of the _sub-eras_ within an _era_; that is, I do so when I feel that I really have to, and when the evidence is clear enough to @@ -3328,9 +3328,9 @@ realms of culture. It is rather that for most of prehistoric time the materials left to the archeologists tend to limit our deductions to technology and economics. -Im so soon out of my competence, as conventional ancient history +I�m so soon out of my competence, as conventional ancient history begins, that I shall only suggest the earlier eras of the -food-producing stage to you. This book is about prehistory, and Im not +food-producing stage to you. This book is about prehistory, and I�m not a universal historian. @@ -3339,28 +3339,28 @@ THE TWO EARLIEST ERAS OF THE FOOD-PRODUCING STAGE The food-producing stage seems to appear in western Asia with really revolutionary suddenness. It is seen by the relative speed with which the traces of new crafts appear in the earliest village-farming -community sites weve dug. It is seen by the spread and multiplication +community sites we�ve dug. It is seen by the spread and multiplication of these sites themselves, and the remarkable growth in human -population we deduce from this increase in sites. Well look at some +population we deduce from this increase in sites. We�ll look at some of these sites and the archeological traces they yield in the next chapter. When such village sites begin to appear, I believe we are in the _era of the primary village-farming community_. I also believe this is the second era of the food-producing stage. The first era of the food-producing stage, I believe, was an _era of -incipient cultivation and animal domestication_. I keep saying I -believe because the actual evidence for this earlier era is so slight +incipient cultivation and animal domestication_. I keep saying �I +believe� because the actual evidence for this earlier era is so slight that one has to set it up mainly by playing a hunch for it. The reason for playing the hunch goes about as follows. One thing we seem to be able to see, in the food-collecting era in general, is a tendency for people to begin to settle down. This settling down seemed to become further intensified in the terminal -era. How this is connected with Professor Mathiassens receptiveness +era. How this is connected with Professor Mathiassen�s �receptiveness� and the tendency to be experimental, we do not exactly know. The evidence from the New World comes into play here as well as that from the Old World. With this settling down in one place, the people of the -terminal era--especially the Forest folk whom we know best--began +terminal era--especially the �Forest folk� whom we know best--began making a great variety of new things. I remarked about this earlier in the chapter. Dr. Robert M. Adams is of the opinion that this atmosphere of experimentation with new tools--with new ways of collecting food--is @@ -3368,9 +3368,9 @@ the kind of atmosphere in which one might expect trials at planting and at animal domestication to have been made. We first begin to find traces of more permanent life in outdoor camp sites, although caves were still inhabited at the beginning of the terminal era. It is not -surprising at all that the Forest folk had already domesticated the +surprising at all that the �Forest folk� had already domesticated the dog. In this sense, the whole era of food-collecting was becoming ready -and almost incipient for cultivation and animal domestication. +and almost �incipient� for cultivation and animal domestication. Northwestern Europe was not the place for really effective beginnings in agriculture and animal domestication. These would have had to take @@ -3425,13 +3425,13 @@ zone which surrounds the drainage basin of the Tigris and Euphrates Rivers at elevations of from approximately 2,000 to 5,000 feet. The lower alluvial land of the Tigris-Euphrates basin itself has very little rainfall. Some years ago Professor James Henry Breasted called -the alluvial lands of the Tigris-Euphrates a part of the fertile -crescent. These alluvial lands are very fertile if irrigated. Breasted +the alluvial lands of the Tigris-Euphrates a part of the �fertile +crescent.� These alluvial lands are very fertile if irrigated. Breasted was most interested in the oriental civilizations of conventional ancient history, and irrigation had been discovered before they appeared. -The country of hilly flanks above Breasteds crescent receives from +The country of hilly flanks above Breasted�s crescent receives from 10 to 20 or more inches of winter rainfall each year, which is about what Kansas has. Above the hilly-flanks zone tower the peaks and ridges of the Lebanon-Amanus chain bordering the coast-line from Palestine @@ -3440,7 +3440,7 @@ range of the Iraq-Iran borderland. This rugged mountain frame for our hilly-flanks zone rises to some magnificent alpine scenery, with peaks of from ten to fifteen thousand feet in elevation. There are several gaps in the Mediterranean coastal portion of the frame, through which -the winters rain-bearing winds from the sea may break so as to carry +the winter�s rain-bearing winds from the sea may break so as to carry rain to the foothills of the Taurus and the Zagros. The picture I hope you will have from this description is that of an @@ -3482,7 +3482,7 @@ hilly-flanks zone in their wild state. With a single exception--that of the dog--the earliest positive evidence of domestication includes the two forms of wheat, the barley, and the goat. The evidence comes from within the hilly-flanks zone. -However, it comes from a settled village proper, Jarmo (which Ill +However, it comes from a settled village proper, Jarmo (which I�ll describe in the next chapter), and is thus from the era of the primary village-farming community. We are still without positive evidence of domesticated grain and animals in the first era of the food-producing @@ -3534,9 +3534,9 @@ and the spread of ideas of people who had passed on into one of the more developed eras. In many cases, the terminal era of food-collecting was ended by the incoming of the food-producing peoples themselves. For example, the practices of food-production were carried into Europe -by the actual movement of some numbers of peoples (we dont know how +by the actual movement of some numbers of peoples (we don�t know how many) who had reached at least the level of the primary village-farming -community. The Forest folk learned food-production from them. There +community. The �Forest folk� learned food-production from them. There was never an era of incipient cultivation and domestication proper in Europe, if my hunch is right. @@ -3547,16 +3547,16 @@ The way I see it, two things were required in order that an era of incipient cultivation and domestication could begin. First, there had to be the natural environment of a nuclear area, with its whole group of plants and animals capable of domestication. This is the aspect of -the matter which weve said is directly given by nature. But it is +the matter which we�ve said is directly given by nature. But it is quite possible that such an environment with such a group of plants and animals in it may have existed well before ten thousand years ago in the Near East. It is also quite possible that the same promising condition may have existed in regions which never developed into nuclear areas proper. Here, again, we come back to the cultural factor. -I think it was that atmosphere of experimentation weve talked about -once or twice before. I cant define it for you, other than to say that +I think it was that �atmosphere of experimentation� we�ve talked about +once or twice before. I can�t define it for you, other than to say that by the end of the Ice Age, the general level of many cultures was ready -for change. Ask me how and why this was so, and Ill tell you we dont +for change. Ask me how and why this was so, and I�ll tell you we don�t know yet, and that if we did understand this kind of question, there would be no need for me to go on being a prehistorian! @@ -3590,7 +3590,7 @@ such collections for the modern wild forms of animals and plants from some of our nuclear areas. In the nuclear area in the Near East, some of the wild animals, at least, have already become extinct. There are no longer wild cattle or wild horses in western Asia. We know they were -there from the finds weve made in caves of late Ice Age times, and +there from the finds we�ve made in caves of late Ice Age times, and from some slightly later sites. @@ -3601,7 +3601,7 @@ incipient era of cultivation and animal domestication. I am closing this chapter with descriptions of two of the best Near Eastern examples I know of. You may not be satisfied that what I am able to describe makes a full-bodied era of development at all. Remember, however, that -Ive told you Im largely playing a kind of a hunch, and also that the +I�ve told you I�m largely playing a kind of a hunch, and also that the archeological materials of this era will always be extremely difficult to interpret. At the beginning of any new way of life, there will be a great tendency for people to make-do, at first, with tools and habits @@ -3613,7 +3613,7 @@ THE NATUFIAN, AN ASSEMBLAGE OF THE INCIPIENT ERA The assemblage called the Natufian comes from the upper layers of a number of caves in Palestine. Traces of its flint industry have also -turned up in Syria and Lebanon. We dont know just how old it is. I +turned up in Syria and Lebanon. We don�t know just how old it is. I guess that it probably falls within five hundred years either way of about 5000 B.C. @@ -3662,7 +3662,7 @@ pendants. There were also beads and pendants of pierced teeth and shell. A number of Natufian burials have been found in the caves; some burials were grouped together in one grave. The people who were buried within the Mount Carmel cave were laid on their backs in an extended position, -while those on the terrace seem to have been flexed (placed in their +while those on the terrace seem to have been �flexed� (placed in their graves in a curled-up position). This may mean no more than that it was easier to dig a long hole in cave dirt than in the hard-packed dirt of the terrace. The people often had some kind of object buried with them, @@ -3679,7 +3679,7 @@ beads. GROUND STONE BONE] -The animal bones of the Natufian layers show beasts of a modern type, +The animal bones of the Natufian layers show beasts of a �modern� type, but with some differences from those of present-day Palestine. The bones of the gazelle far outnumber those of the deer; since gazelles like a much drier climate than deer, Palestine must then have had much @@ -3692,9 +3692,9 @@ Maglemosian of northern Europe. More recently, it has been reported that a domesticated goat is also part of the Natufian finds. The study of the human bones from the Natufian burials is not yet -complete. Until Professor McCowns study becomes available, we may note -Professor Coons assessment that these people were of a basically -Mediterranean type. +complete. Until Professor McCown�s study becomes available, we may note +Professor Coon�s assessment that these people were of a �basically +Mediterranean type.� THE KARIM SHAHIR ASSEMBLAGE @@ -3704,11 +3704,11 @@ of a temporary open site or encampment. It lies on the top of a bluff in the Kurdish hill-country of northeastern Iraq. It was dug by Dr. Bruce Howe of the expedition I directed in 1950-51 for the Oriental Institute and the American Schools of Oriental Research. In 1954-55, -our expedition located another site, Mlefaat, with general resemblance +our expedition located another site, M�lefaat, with general resemblance to Karim Shahir, but about a hundred miles north of it. In 1956, Dr. Ralph Solecki located still another Karim Shahir type of site called Zawi Chemi Shanidar. The Zawi Chemi site has a radiocarbon date of 8900 - 300 B.C. +� 300 B.C. Karim Shahir has evidence of only one very shallow level of occupation. It was probably not lived on very long, although the people who lived @@ -3717,7 +3717,7 @@ layer yielded great numbers of fist-sized cracked pieces of limestone, which had been carried up from the bed of a stream at the bottom of the bluff. We think these cracked stones had something to do with a kind of architecture, but we were unable to find positive traces of hut plans. -At Mlefaat and Zawi Chemi, there were traces of rounded hut plans. +At M�lefaat and Zawi Chemi, there were traces of rounded hut plans. As in the Natufian, the great bulk of small objects of the Karim Shahir assemblage was in chipped flint. A large proportion of the flint tools @@ -3737,7 +3737,7 @@ clay figurines which seemed to be of animal form. UNBAKED CLAY SHELL BONE - ARCHITECTURE] + �ARCHITECTURE�] Karim Shahir did not yield direct evidence of the kind of vegetable food its people ate. The animal bones showed a considerable @@ -3746,7 +3746,7 @@ domestication--sheep, goat, cattle, horse, dog--as compared with animal bones from the earlier cave sites of the area, which have a high proportion of bones of wild forms like deer and gazelle. But we do not know that any of the Karim Shahir animals were actually domesticated. -Some of them may have been, in an incipient way, but we have no means +Some of them may have been, in an �incipient� way, but we have no means at the moment that will tell us from the bones alone. @@ -3761,7 +3761,7 @@ goat, and the general animal situation at Karim Shahir to hint at an incipient approach to food-production. At Karim Shahir, there was the tendency to settle down out in the open; this is echoed by the new reports of open air Natufian sites. The large number of cracked stones -certainly indicates that it was worth the peoples while to have some +certainly indicates that it was worth the peoples� while to have some kind of structure, even if the site as a whole was short-lived. It is a part of my hunch that these things all point toward @@ -3771,13 +3771,13 @@ which we shall look at next, are fully food-producing, the Natufian and Karim Shahir folk had not yet arrived. I think they were part of a general build-up to full scale food-production. They were possibly controlling a few animals of several kinds and perhaps one or two -plants, without realizing the full possibilities of this control as a +plants, without realizing the full possibilities of this �control� as a new way of life. This is why I think of the Karim Shahir and Natufian folk as being at a level, or in an era, of incipient cultivation and domestication. But we shall have to do a great deal more excavation in this range of time -before well get the kind of positive information we need. +before we�ll get the kind of positive information we need. SUMMARY @@ -3798,7 +3798,7 @@ history. We know the earliest village-farming communities appeared in western Asia, in a nuclear area. We do not yet know why the Near Eastern -experiment came first, or why it didnt happen earlier in some other +experiment came first, or why it didn�t happen earlier in some other nuclear area. Apparently, the level of culture and the promise of the natural environment were ready first in western Asia. The next sites we look at will show a simple but effective food-production already @@ -3835,7 +3835,7 @@ contrast between food-collecting and food-producing as ways of life. THE DIFFERENCE BETWEEN FOOD-COLLECTORS AND FOOD-PRODUCERS -Childe used the word revolution because of the radical change that +Childe used the word �revolution� because of the radical change that took place in the habits and customs of man. Food-collectors--that is, hunters, fishers, berry- and nut-gatherers--had to live in small groups or bands, for they had to be ready to move wherever their food supply @@ -3851,7 +3851,7 @@ for clothing beyond the tools that were probably used to dress the skins of animals; no time to think of much of anything but food and protection and disposal of the dead when death did come: an existence which takes nature as it finds it, which does little or nothing to -modify nature--all in all, a savages existence, and a very tough one. +modify nature--all in all, a savage�s existence, and a very tough one. A man who spends his whole life following animals just to kill them to eat, or moving from one berry patch to another, is really living just like an animal himself. @@ -3859,10 +3859,10 @@ like an animal himself. THE FOOD-PRODUCING ECONOMY -Against this picture let me try to draw another--that of mans life -after food-production had begun. His meat was stored on the hoof, +Against this picture let me try to draw another--that of man�s life +after food-production had begun. His meat was stored �on the hoof,� his grain in silos or great pottery jars. He lived in a house: it was -worth his while to build one, because he couldnt move far from his +worth his while to build one, because he couldn�t move far from his fields and flocks. In his neighborhood enough food could be grown and enough animals bred so that many people were kept busy. They all lived close to their flocks and fields, in a village. The village was @@ -3872,7 +3872,7 @@ Children and old men could shepherd the animals by day or help with the lighter work in the fields. After the crops had been harvested the younger men might go hunting and some of them would fish, but the food they brought in was only an addition to the food in the village; the -villagers wouldnt starve, even if the hunters and fishermen came home +villagers wouldn�t starve, even if the hunters and fishermen came home empty-handed. There was more time to do different things, too. They began to modify @@ -3885,23 +3885,23 @@ people in the village who were becoming full-time craftsmen. Other things were changing, too. The villagers must have had to agree on new rules for living together. The head man of the village had problems different from those of the chief of the small -food-collectors band. If somebodys flock of sheep spoiled a wheat +food-collectors� band. If somebody�s flock of sheep spoiled a wheat field, the owner wanted payment for the grain he lost. The chief of the hunters was never bothered with such questions. Even the gods had changed. The spirits and the magic that had been used by hunters -werent of any use to the villagers. They needed gods who would watch +weren�t of any use to the villagers. They needed gods who would watch over the fields and the flocks, and they eventually began to erect buildings where their gods might dwell, and where the men who knew most about the gods might live. -WAS FOOD-PRODUCTION A REVOLUTION? +WAS FOOD-PRODUCTION A �REVOLUTION�? If you can see the difference between these two pictures--between life in the food-collecting stage and life after food-production -had begun--youll see why Professor Childe speaks of a revolution. -By revolution, he doesnt mean that it happened over night or that -it happened only once. We dont know exactly how long it took. Some +had begun--you�ll see why Professor Childe speaks of a revolution. +By revolution, he doesn�t mean that it happened over night or that +it happened only once. We don�t know exactly how long it took. Some people think that all these changes may have occurred in less than 500 years, but I doubt that. The incipient era was probably an affair of some duration. Once the level of the village-farming community had @@ -3915,7 +3915,7 @@ been achieved with truly revolutionary suddenness. GAPS IN OUR KNOWLEDGE OF THE NEAR EAST -If youll look again at the chart (p. 111) youll see that I have +If you�ll look again at the chart (p. 111) you�ll see that I have very few sites and assemblages to name in the incipient era of cultivation and domestication, and not many in the earlier part of the primary village-farming level either. Thanks in no small part @@ -3926,20 +3926,20 @@ yard-stick here. But I am far from being able to show you a series of Sears Roebuck catalogues, even century by century, for any part of the nuclear area. There is still a great deal of earth to move, and a great mass of material to recover and interpret before we even begin to -understand how and why. +understand �how� and �why.� Perhaps here, because this kind of archeology is really my specialty, -youll excuse it if I become personal for a moment. I very much look +you�ll excuse it if I become personal for a moment. I very much look forward to having further part in closing some of the gaps in knowledge -of the Near East. This is not, as Ive told you, the spectacular +of the Near East. This is not, as I�ve told you, the spectacular range of Near Eastern archeology. There are no royal tombs, no gold, no great buildings or sculpture, no writing, in fact nothing to excite the normal museum at all. Nevertheless it is a range which, idea-wise, gives the archeologist tremendous satisfaction. The country of the hilly flanks is an exciting combination of green grasslands and mountainous ridges. The Kurds, who inhabit the part of the area -in which Ive worked most recently, are an extremely interesting and -hospitable people. Archeologists dont become rich, but Ill forego +in which I�ve worked most recently, are an extremely interesting and +hospitable people. Archeologists don�t become rich, but I�ll forego the Cadillac for any bright spring morning in the Kurdish hills, on a good site with a happy crew of workmen and an interested and efficient staff. It is probably impossible to convey the full feeling which life @@ -3965,15 +3965,15 @@ like the use of pottery borrowed from the more developed era of the same time in the nuclear area. The same general explanation doubtless holds true for certain materials in Egypt, along the upper Nile and in the Kharga oasis: these materials, called Sebilian III, the Khartoum -neolithic, and the Khargan microlithic, are from surface sites, +�neolithic,� and the Khargan microlithic, are from surface sites, not from caves. The chart (p. 111) shows where I would place these materials in era and time. [Illustration: THE HILLY FLANKS OF THE CRESCENT AND EARLY SITES OF THE NEAR EAST] -Both Mlefaat and Dr. Soleckis Zawi Chemi Shanidar site appear to have -been slightly more settled in than was Karim Shahir itself. But I do +Both M�lefaat and Dr. Solecki�s Zawi Chemi Shanidar site appear to have +been slightly more �settled in� than was Karim Shahir itself. But I do not think they belong to the era of farming-villages proper. The first site of this era, in the hills of Iraqi Kurdistan, is Jarmo, on which we have spent three seasons of work. Following Jarmo comes a variety of @@ -3989,9 +3989,9 @@ times when their various cultures flourished, there must have been many little villages which shared the same general assemblage. We are only now beginning to locate them again. Thus, if I speak of Jarmo, or Jericho, or Sialk as single examples of their particular kinds of -assemblages, I dont mean that they were unique at all. I think I could +assemblages, I don�t mean that they were unique at all. I think I could take you to the sites of at least three more Jarmos, within twenty -miles of the original one. They are there, but they simply havent yet +miles of the original one. They are there, but they simply haven�t yet been excavated. In 1956, a Danish expedition discovered material of Jarmo type at Shimshara, only two dozen miles northeast of Jarmo, and below an assemblage of Hassunan type (which I shall describe presently). @@ -4000,15 +4000,15 @@ below an assemblage of Hassunan type (which I shall describe presently). THE GAP BETWEEN KARIM SHAHIR AND JARMO As we see the matter now, there is probably still a gap in the -available archeological record between the Karim Shahir-Mlefaat-Zawi +available archeological record between the Karim Shahir-M�lefaat-Zawi Chemi group (of the incipient era) and that of Jarmo (of the village-farming era). Although some items of the Jarmo type materials do reflect the beginnings of traditions set in the Karim Shahir group (see p. 120), there is not a clear continuity. Moreover--to the degree that we may trust a few radiocarbon dates--there would appear to be around two thousand years of difference in time. The single -available Zawi Chemi date is 8900 300 B.C.; the most reasonable -group of dates from Jarmo average to about 6750 200 B.C. I am +available Zawi Chemi �date� is 8900 � 300 B.C.; the most reasonable +group of �dates� from Jarmo average to about 6750 � 200 B.C. I am uncertain about this two thousand years--I do not think it can have been so long. @@ -4021,7 +4021,7 @@ JARMO, IN THE KURDISH HILLS, IRAQ The site of Jarmo has a depth of deposit of about twenty-seven feet, and approximately a dozen layers of architectural renovation and -change. Nevertheless it is a one period site: its assemblage remains +change. Nevertheless it is a �one period� site: its assemblage remains essentially the same throughout, although one or two new items are added in later levels. It covers about four acres of the top of a bluff, below which runs a small stream. Jarmo lies in the hill country @@ -4078,7 +4078,7 @@ human beings in clay; one type of human figurine they favored was that of a markedly pregnant woman, probably the expression of some sort of fertility spirit. They provided their house floors with baked-in-place depressions, either as basins or hearths, and later with domed ovens of -clay. As weve noted, the houses themselves were of clay or mud; one +clay. As we�ve noted, the houses themselves were of clay or mud; one could almost say they were built up like a house-sized pot. Then, finally, the idea of making portable pottery itself appeared, although I very much doubt that the people of the Jarmo village discovered the @@ -4095,11 +4095,11 @@ over three hundred miles to the north. Already a bulk carrying trade had been established--the forerunner of commerce--and the routes were set by which, in later times, the metal trade was to move. -There are now twelve radioactive carbon dates from Jarmo. The most -reasonable cluster of determinations averages to about 6750 200 -B.C., although there is a completely unreasonable range of dates +There are now twelve radioactive carbon �dates� from Jarmo. The most +reasonable cluster of determinations averages to about 6750 � 200 +B.C., although there is a completely unreasonable range of �dates� running from 3250 to 9250 B.C.! _If_ I am right in what I take to be -reasonable, the first flush of the food-producing revolution had been +�reasonable,� the first flush of the food-producing revolution had been achieved almost nine thousand years ago. @@ -4117,7 +4117,7 @@ it, but the Hassunan sites seem to cluster at slightly lower elevations than those we have been talking about so far. The catalogue of the Hassuna assemblage is of course more full and -elaborate than that of Jarmo. The Iraqi governments archeologists +elaborate than that of Jarmo. The Iraqi government�s archeologists who dug Hassuna itself, exposed evidence of increasing architectural know-how. The walls of houses were still formed of puddled mud; sun-dried bricks appear only in later periods. There were now several @@ -4130,16 +4130,16 @@ largely disappeared by Hassunan times. The flint work of the Hassunan catalogue is, by and large, a wretched affair. We might guess that the kinaesthetic concentration of the Hassuna craftsmen now went into other categories; that is, they suddenly discovered they might have more fun -working with the newer materials. Its a shame, for example, that none +working with the newer materials. It�s a shame, for example, that none of their weaving is preserved for us. The two available radiocarbon determinations from Hassunan contexts -stand at about 5100 and 5600 B.C. 250 years. +stand at about 5100 and 5600 B.C. � 250 years. OTHER EARLY VILLAGE SITES IN THE NUCLEAR AREA -Ill now name and very briefly describe a few of the other early +I�ll now name and very briefly describe a few of the other early village assemblages either in or adjacent to the hilly flanks of the crescent. Unfortunately, we do not have radioactive carbon dates for many of these materials. We may guess that some particular assemblage, @@ -4177,7 +4177,7 @@ ecological niche, some seven hundred feet below sea level; it is geographically within the hilly-flanks zone but environmentally not part of it. -Several radiocarbon dates for Jericho fall within the range of those +Several radiocarbon �dates� for Jericho fall within the range of those I find reasonable for Jarmo, and their internal statistical consistency is far better than that for the Jarmo determinations. It is not yet clear exactly what this means. @@ -4226,7 +4226,7 @@ how things were made are different; the Sialk assemblage represents still another cultural pattern. I suspect it appeared a bit later in time than did that of Hassuna. There is an important new item in the Sialk catalogue. The Sialk people made small drills or pins of -hammered copper. Thus the metallurgists specialized craft had made its +hammered copper. Thus the metallurgist�s specialized craft had made its appearance. There is at least one very early Iranian site on the inward slopes @@ -4246,7 +4246,7 @@ shore of the Fayum lake. The Fayum materials come mainly from grain bins or silos. Another site, Merimde, in the western part of the Nile delta, shows the remains of a true village, but it may be slightly later than the settlement of the Fayum. There are radioactive carbon -dates for the Fayum materials at about 4275 B.C. 320 years, which +�dates� for the Fayum materials at about 4275 B.C. � 320 years, which is almost fifteen hundred years later than the determinations suggested for the Hassunan or Syro-Cilician assemblages. I suspect that this is a somewhat over-extended indication of the time it took for the @@ -4260,13 +4260,13 @@ the mound called Shaheinab. The Shaheinab catalogue roughly corresponds to that of the Fayum; the distance between the two places, as the Nile flows, is roughly 1,500 miles. Thus it took almost a thousand years for the new way of life to be carried as far south into Africa as Khartoum; -the two Shaheinab dates average about 3300 B.C. 400 years. +the two Shaheinab �dates� average about 3300 B.C. � 400 years. If the movement was up the Nile (southward), as these dates suggest, then I suspect that the earliest available village material of middle Egypt, the so-called Tasian, is also later than that of the Fayum. The Tasian materials come from a few graves near a village called Deir -Tasa, and I have an uncomfortable feeling that the Tasian assemblage +Tasa, and I have an uncomfortable feeling that the Tasian �assemblage� may be mainly an artificial selection of poor examples of objects which belong in the following range of time. @@ -4280,7 +4280,7 @@ spread outward in space from the nuclear area, as time went on. There is good archeological evidence that both these processes took place. For the hill country of northeastern Iraq, in the nuclear area, we have already noticed how the succession (still with gaps) from Karim -Shahir, through Mlefaat and Jarmo, to Hassuna can be charted (see +Shahir, through M�lefaat and Jarmo, to Hassuna can be charted (see chart, p. 111). In the next chapter, we shall continue this charting and description of what happened in Iraq upward through time. We also watched traces of the new way of life move through space up the Nile @@ -4299,7 +4299,7 @@ appearance of the village-farming community there--is still an open one. In the last chapter, we noted the probability of an independent nuclear area in southeastern Asia. Professor Carl Sauer strongly champions the great importance of this area as _the_ original center -of agricultural pursuits, as a kind of cradle of all incipient eras +of agricultural pursuits, as a kind of �cradle� of all incipient eras of the Old World at least. While there is certainly not the slightest archeological evidence to allow us to go that far, we may easily expect that an early southeast Asian development would have been felt in @@ -4311,13 +4311,13 @@ way of life moved well beyond Khartoum in Africa. THE SPREAD OF THE VILLAGE-FARMING COMMUNITY WAY OF LIFE INTO EUROPE -How about Europe? I wont give you many details. You can easily imagine +How about Europe? I won�t give you many details. You can easily imagine that the late prehistoric prelude to European history is a complicated affair. We all know very well how complicated an area Europe is now, with its welter of different languages and cultures. Remember, however, that a great deal of archeology has been done on the late prehistory of Europe, and very little on that of further Asia and Africa. If we knew -as much about these areas as we do of Europe, I expect wed find them +as much about these areas as we do of Europe, I expect we�d find them just as complicated. This much is clear for Europe, as far as the spread of the @@ -4329,21 +4329,21 @@ in western Asia. I do not, of course, mean that there were traveling salesmen who carried these ideas and things to Europe with a commercial gleam in their eyes. The process took time, and the ideas and things must have been passed on from one group of people to the next. There -was also some actual movement of peoples, but we dont know the size of +was also some actual movement of peoples, but we don�t know the size of the groups that moved. -The story of the colonization of Europe by the first farmers is +The story of the �colonization� of Europe by the first farmers is thus one of (1) the movement from the eastern Mediterranean lands of some people who were farmers; (2) the spread of ideas and things beyond the Near East itself and beyond the paths along which the -colonists moved; and (3) the adaptations of the ideas and things -by the indigenous Forest folk, about whose receptiveness Professor +�colonists� moved; and (3) the adaptations of the ideas and things +by the indigenous �Forest folk�, about whose �receptiveness� Professor Mathiassen speaks (p. 97). It is important to note that the resulting cultures in the new European environment were European, not Near -Eastern. The late Professor Childe remarked that the peoples of the +Eastern. The late Professor Childe remarked that �the peoples of the West were not slavish imitators; they adapted the gifts from the East ... into a new and organic whole capable of developing on its own -original lines. +original lines.� THE WAYS TO EUROPE @@ -4389,19 +4389,19 @@ Hill, the earliest known trace of village-farming communities in England, is about 2500 B.C. I would expect about 5500 B.C. to be a safe date to give for the well-developed early village communities of Syro-Cilicia. We suspect that the spread throughout Europe did not -proceed at an even rate. Professor Piggott writes that at a date +proceed at an even rate. Professor Piggott writes that �at a date probably about 2600 B.C., simple agricultural communities were being established in Spain and southern France, and from the latter region a spread northwards can be traced ... from points on the French seaboard of the [English] Channel ... there were emigrations of a certain number of these tribes by boat, across to the chalk lands of Wessex and Sussex [in England], probably not more than three or four generations later -than the formation of the south French colonies. +than the formation of the south French colonies.� New radiocarbon determinations are becoming available all the time--already several suggest that the food-producing way of life had reached the lower Rhine and Holland by 4000 B.C. But not all -prehistorians accept these dates, so I do not show them on my map +prehistorians accept these �dates,� so I do not show them on my map (p. 139). @@ -4427,7 +4427,7 @@ concentric sets of banks and ditches. Traces of oblong timber houses have been found, but not within the enclosures. The second type of structure is mine-shafts, dug down into the chalk beds where good flint for the making of axes or hoes could be found. The third type -of structure is long simple mounds or unchambered barrows, in one +of structure is long simple mounds or �unchambered barrows,� in one end of which burials were made. It has been commonly believed that the Windmill Hill assemblage belonged entirely to the cultural tradition which moved up through France to the Channel. Professor Piggott is now @@ -4443,12 +4443,12 @@ consists mainly of tombs and the contents of tombs, with only very rare settlement sites. The tombs were of some size and received the bodies of many people. The tombs themselves were built of stone, heaped over with earth; the stones enclosed a passage to a central chamber -(passage graves), or to a simple long gallery, along the sides of -which the bodies were laid (gallery graves). The general type of -construction is called megalithic (= great stone), and the whole +(�passage graves�), or to a simple long gallery, along the sides of +which the bodies were laid (�gallery graves�). The general type of +construction is called �megalithic� (= great stone), and the whole earth-mounded structure is often called a _barrow_. Since many have -proper chambers, in one sense or another, we used the term unchambered -barrow above to distinguish those of the Windmill Hill type from these +proper chambers, in one sense or another, we used the term �unchambered +barrow� above to distinguish those of the Windmill Hill type from these megalithic structures. There is some evidence for sacrifice, libations, and ceremonial fires, and it is clear that some form of community ritual was focused on the megalithic tombs. @@ -4466,7 +4466,7 @@ The third early British group of antiquities of this general time It is not so certain that the people who made this assemblage, called Peterborough, were actually farmers. While they may on occasion have practiced a simple agriculture, many items of their assemblage link -them closely with that of the Forest folk of earlier times in +them closely with that of the �Forest folk� of earlier times in England and in the Baltic countries. Their pottery is decorated with impressions of cords and is quite different from that of Windmill Hill and the megalithic builders. In addition, the distribution of their @@ -4479,7 +4479,7 @@ to acquire the raw material for stone axes. A probably slightly later culture, whose traces are best known from Skara Brae on Orkney, also had its roots in those cultures of the -Baltic area which fused out of the meeting of the Forest folk and +Baltic area which fused out of the meeting of the �Forest folk� and the peoples who took the eastern way into Europe. Skara Brae is very well preserved, having been built of thin stone slabs about which dune-sand drifted after the village died. The individual houses, the @@ -4498,14 +4498,14 @@ details which I have omitted in order to shorten the story. I believe some of the difficulty we have in understanding the establishment of the first farming communities in Europe is with -the word colonization. We have a natural tendency to think of -colonization as it has happened within the last few centuries. In the +the word �colonization.� We have a natural tendency to think of +�colonization� as it has happened within the last few centuries. In the case of the colonization of the Americas, for example, the colonists came relatively quickly, and in increasingly vast numbers. They had vastly superior technical, political, and war-making skills, compared with those of the Indians. There was not much mixing with the Indians. The case in Europe five or six thousand years ago must have been very -different. I wonder if it is even proper to call people colonists +different. I wonder if it is even proper to call people �colonists� who move some miles to a new region, settle down and farm it for some years, then move on again, generation after generation? The ideas and the things which these new people carried were only _potentially_ @@ -4521,12 +4521,12 @@ migrants were moving by boat, long distances may have been covered in a short time. Remember, however, we seem to have about three thousand years between the early Syro-Cilician villages and Windmill Hill. -Let me repeat Professor Childe again. The peoples of the West were +Let me repeat Professor Childe again. �The peoples of the West were not slavish imitators: they adapted the gifts from the East ... into a new and organic whole capable of developing on its own original -lines. Childe is of course completely conscious of the fact that his -peoples of the West were in part the descendants of migrants who came -originally from the East, bringing their gifts with them. This +lines.� Childe is of course completely conscious of the fact that his +�peoples of the West� were in part the descendants of migrants who came +originally from the �East,� bringing their �gifts� with them. This was the late prehistoric achievement of Europe--to take new ideas and things and some migrant peoples and, by mixing them with the old in its own environments, to forge a new and unique series of cultures. @@ -4553,14 +4553,14 @@ things first happened there and also because I know it best. There is another interesting thing, too. We have seen that the first experiment in village-farming took place in the Near East. So did -the first experiment in civilization. Both experiments took. The +the first experiment in civilization. Both experiments �took.� The traditions we live by today are based, ultimately, on those ancient beginnings in food-production and civilization in the Near East. -WHAT CIVILIZATION MEANS +WHAT �CIVILIZATION� MEANS -I shall not try to define civilization for you; rather, I shall +I shall not try to define �civilization� for you; rather, I shall tell you what the word brings to my mind. To me civilization means urbanization: the fact that there are cities. It means a formal political set-up--that there are kings or governing bodies that the @@ -4606,7 +4606,7 @@ of Mexico, the Mayas of Yucatan and Guatemala, and the Incas of the Andes were civilized. -WHY DIDNT CIVILIZATION COME TO ALL FOOD-PRODUCERS? +WHY DIDN�T CIVILIZATION COME TO ALL FOOD-PRODUCERS? Once you have food-production, even at the well-advanced level of the village-farming community, what else has to happen before you @@ -4625,13 +4625,13 @@ early civilization, is still an open and very interesting question. WHERE CIVILIZATION FIRST APPEARED IN THE NEAR EAST You remember that our earliest village-farming communities lay along -the hilly flanks of a great crescent. (See map on p. 125.) -Professor Breasteds fertile crescent emphasized the rich river +the hilly flanks of a great �crescent.� (See map on p. 125.) +Professor Breasted�s �fertile crescent� emphasized the rich river valleys of the Nile and the Tigris-Euphrates Rivers. Our hilly-flanks area of the crescent zone arches up from Egypt through Palestine and Syria, along southern Turkey into northern Iraq, and down along the southwestern fringe of Iran. The earliest food-producing villages we -know already existed in this area by about 6750 B.C. ( 200 years). +know already existed in this area by about 6750 B.C. (� 200 years). Now notice that this hilly-flanks zone does not include southern Mesopotamia, the alluvial land of the lower Tigris and Euphrates in @@ -4639,7 +4639,7 @@ Iraq, or the Nile Valley proper. The earliest known villages of classic Mesopotamia and Egypt seem to appear fifteen hundred or more years after those of the hilly-flanks zone. For example, the early Fayum village which lies near a lake west of the Nile Valley proper (see p. -135) has a radiocarbon date of 4275 B.C. 320 years. It was in the +135) has a radiocarbon date of 4275 B.C. � 320 years. It was in the river lands, however, that the immediate beginnings of civilization were made. @@ -4657,8 +4657,8 @@ THE HILLY-FLANKS ZONE VERSUS THE RIVER LANDS Why did these two civilizations spring up in these two river lands which apparently were not even part of the area where the -village-farming community began? Why didnt we have the first -civilizations in Palestine, Syria, north Iraq, or Iran, where were +village-farming community began? Why didn�t we have the first +civilizations in Palestine, Syria, north Iraq, or Iran, where we�re sure food-production had had a long time to develop? I think the probable answer gives a clue to the ways in which civilization began in Egypt and Mesopotamia. @@ -4669,7 +4669,7 @@ and Syria. There are pleasant mountain slopes, streams running out to the sea, and rain, at least in the winter months. The rain belt and the foothills of the Turkish mountains also extend to northern Iraq and on to the Iranian plateau. The Iranian plateau has its mountain valleys, -streams, and some rain. These hilly flanks of the crescent, through +streams, and some rain. These hilly flanks of the �crescent,� through most of its arc, are almost made-to-order for beginning farmers. The grassy slopes of the higher hills would be pasture for their herds and flocks. As soon as the earliest experiments with agriculture and @@ -4720,10 +4720,10 @@ Obviously, we can no longer find the first dikes or reservoirs of the Nile Valley, or the first canals or ditches of Mesopotamia. The same land has been lived on far too long for any traces of the first attempts to be left; or, especially in Egypt, it has been covered by -the yearly deposits of silt, dropped by the river floods. But were +the yearly deposits of silt, dropped by the river floods. But we�re pretty sure the first food-producers of Egypt and southern Mesopotamia must have made such dikes, canals, and ditches. In the first place, -there cant have been enough rain for them to grow things otherwise. +there can�t have been enough rain for them to grow things otherwise. In the second place, the patterns for such projects seem to have been pretty well set by historic times. @@ -4733,10 +4733,10 @@ CONTROL OF THE RIVERS THE BUSINESS OF EVERYONE Here, then, is a _part_ of the reason why civilization grew in Egypt and Mesopotamia first--not in Palestine, Syria, or Iran. In the latter areas, people could manage to produce their food as individuals. It -wasnt too hard; there were rain and some streams, and good pasturage +wasn�t too hard; there were rain and some streams, and good pasturage for the animals even if a crop or two went wrong. In Egypt and Mesopotamia, people had to put in a much greater amount of work, and -this work couldnt be individual work. Whole villages or groups of +this work couldn�t be individual work. Whole villages or groups of people had to turn out to fix dikes or dig ditches. The dikes had to be repaired and the ditches carefully cleared of silt each year, or they would become useless. @@ -4745,7 +4745,7 @@ There also had to be hard and fast rules. The person who lived nearest the ditch or the reservoir must not be allowed to take all the water and leave none for his neighbors. It was not only a business of learning to control the rivers and of making their waters do the -farmers work. It also meant controlling men. But once these men had +farmer�s work. It also meant controlling men. But once these men had managed both kinds of controls, what a wonderful yield they had! The soil was already fertile, and the silt which came in the floods and ditches kept adding fertile soil. @@ -4756,7 +4756,7 @@ THE GERM OF CIVILIZATION IN EGYPT AND MESOPOTAMIA This learning to work together for the common good was the real germ of the Egyptian and the Mesopotamian civilizations. The bare elements of civilization were already there: the need for a governing hand and for -laws to see that the communities work was done and that the water was +laws to see that the communities� work was done and that the water was justly shared. You may object that there is a sort of chicken and egg paradox in this idea. How could the people set up the rules until they had managed to get a way to live, and how could they manage to get a @@ -4781,12 +4781,12 @@ My explanation has been pointed particularly at Egypt and Mesopotamia. I have already told you that the irrigation and water-control part of it does not apply to the development of the Aztecs or the Mayas, or perhaps anybody else. But I think that a fair part of the story of -Egypt and Mesopotamia must be as Ive just told you. +Egypt and Mesopotamia must be as I�ve just told you. I am particularly anxious that you do _not_ understand me to mean that irrigation _caused_ civilization. I am sure it was not that simple at all. For, in fact, a complex and highly engineered irrigation system -proper did not come until later times. Lets say rather that the simple +proper did not come until later times. Let�s say rather that the simple beginnings of irrigation allowed and in fact encouraged a great number of things in the technological, political, social, and moral realms of culture. We do not yet understand what all these things were or how @@ -4842,7 +4842,7 @@ the mound which later became the holy Sumerian city of Eridu, Iraqi archeologists uncovered a handsome painted pottery. Pottery of the same type had been noticed earlier by German archeologists on the surface of a small mound, awash in the spring floods, near the remains of the -Biblical city of Erich (Sumerian = Uruk; Arabic = Warka). This Eridu +Biblical city of Erich (Sumerian = Uruk; Arabic = Warka). This �Eridu� pottery, which is about all we have of the assemblage of the people who once produced it, may be seen as a blend of the Samarran and Halafian painted pottery styles. This may over-simplify the case, but as yet we @@ -4864,7 +4864,7 @@ seems to move into place before the Halaf manifestation is finished, and to blend with it. The Ubaidian assemblage in the south is by far the more spectacular. The development of the temple has been traced at Eridu from a simple little structure to a monumental building some -62 feet long, with a pilaster-decorated faade and an altar in its +62 feet long, with a pilaster-decorated fa�ade and an altar in its central chamber. There is painted Ubaidian pottery, but the style is hurried and somewhat careless and gives the _impression_ of having been a cheap mass-production means of decoration when compared with the @@ -4879,7 +4879,7 @@ turtle-like faces are another item in the southern Ubaidian assemblage. There is a large Ubaid cemetery at Eridu, much of it still awaiting excavation. The few skeletons so far tentatively studied reveal a -completely modern type of Mediterraneanoid; the individuals whom the +completely modern type of �Mediterraneanoid�; the individuals whom the skeletons represent would undoubtedly blend perfectly into the modern population of southern Iraq. What the Ubaidian assemblage says to us is that these people had already adapted themselves and their culture to @@ -4925,7 +4925,7 @@ woven stuffs must have been the mediums of exchange. Over what area did the trading net-work of Ubaid extend? We start with the idea that the Ubaidian assemblage is most richly developed in the south. We assume, I think, correctly, that it represents a cultural flowering of the south. -On the basis of the pottery of the still elusive Eridu immigrants +On the basis of the pottery of the still elusive �Eridu� immigrants who had first followed the rivers into alluvial Mesopotamia, we get the notion that the characteristic painted pottery style of Ubaid was developed in the southland. If this reconstruction is correct @@ -4935,7 +4935,7 @@ assemblage of (and from the southern point of view, _fairly_ pure) Ubaidian material in northern Iraq. The pottery appears all along the Iranian flanks, even well east of the head of the Persian Gulf, and ends in a later and spectacular flourish in an extremely handsome -painted style called the Susa style. Ubaidian pottery has been noted +painted style called the �Susa� style. Ubaidian pottery has been noted up the valleys of both of the great rivers, well north of the Iraqi and Syrian borders on the southern flanks of the Anatolian plateau. It reaches the Mediterranean Sea and the valley of the Orontes in @@ -4965,10 +4965,10 @@ Mesopotamia. Next, much to our annoyance, we have what is almost a temporary black-out. According to the system of terminology I favor, our next -assemblage after that of Ubaid is called the _Warka_ phase, from +�assemblage� after that of Ubaid is called the _Warka_ phase, from the Arabic name for the site of Uruk or Erich. We know it only from six or seven levels in a narrow test-pit at Warka, and from an even -smaller hole at another site. This assemblage, so far, is known only +smaller hole at another site. This �assemblage,� so far, is known only by its pottery, some of which still bears Ubaidian style painting. The characteristic Warkan pottery is unpainted, with smoothed red or gray surfaces and peculiar shapes. Unquestionably, there must be a great @@ -4979,7 +4979,7 @@ have to excavate it! THE DAWN OF CIVILIZATION After our exasperation with the almost unknown Warka interlude, -following the brilliant false dawn of Ubaid, we move next to an +following the brilliant �false dawn� of Ubaid, we move next to an assemblage which yields traces of a preponderance of those elements which we noted (p. 144) as meaning civilization. This assemblage is that called _Proto-Literate_; it already contains writing. On @@ -4988,8 +4988,8 @@ history--and no longer prehistory--the assemblage is named for the historical implications of its content, and no longer after the name of the site where it was first found. Since some of the older books used site-names for this assemblage, I will tell you that the Proto-Literate -includes the latter half of what used to be called the Uruk period -_plus_ all of what used to be called the Jemdet Nasr period. It shows +includes the latter half of what used to be called the �Uruk period� +_plus_ all of what used to be called the �Jemdet Nasr period.� It shows a consistent development from beginning to end. I shall, in fact, leave much of the description and the historic @@ -5033,18 +5033,18 @@ mental block seems to have been removed. Clay tablets bearing pictographic signs are the Proto-Literate forerunners of cuneiform writing. The earliest examples are not well -understood but they seem to be devices for making accounts and -for remembering accounts. Different from the later case in Egypt, +understood but they seem to be �devices for making accounts and +for remembering accounts.� Different from the later case in Egypt, where writing appears fully formed in the earliest examples, the development from simple pictographic signs to proper cuneiform writing may be traced, step by step, in Mesopotamia. It is most probable that the development of writing was connected with the temple and -the need for keeping account of the temples possessions. Professor +the need for keeping account of the temple�s possessions. Professor Jacobsen sees writing as a means for overcoming space, time, and the -increasing complications of human affairs: Literacy, which began +increasing complications of human affairs: �Literacy, which began with ... civilization, enhanced mightily those very tendencies in its development which characterize it as a civilization and mark it off as -such from other types of culture. +such from other types of culture.� [Illustration: RELIEF ON A PROTO-LITERATE STONE VASE, WARKA @@ -5098,7 +5098,7 @@ civilized way of life. I suppose you could say that the difference in the approach is that as a prehistorian I have been looking forward or upward in time, while the -historians look backward to glimpse what Ive been describing here. My +historians look backward to glimpse what I�ve been describing here. My base-line was half a million years ago with a being who had little more than the capacity to make tools and fire to distinguish him from the animals about him. Thus my point of view and that of the conventional @@ -5114,17 +5114,17 @@ End of PREHISTORY [Illustration] -Youll doubtless easily recall your general course in ancient history: +You�ll doubtless easily recall your general course in ancient history: how the Sumerian dynasties of Mesopotamia were supplanted by those of Babylonia, how the Hittite kingdom appeared in Anatolian Turkey, and about the three great phases of Egyptian history. The literate kingdom of Crete arose, and by 1500 B.C. there were splendid fortified Mycenean towns on the mainland of Greece. This was the time--about the whole eastern end of the Mediterranean--of what Professor Breasted called the -first great internationalism, with flourishing trade, international +�first great internationalism,� with flourishing trade, international treaties, and royal marriages between Egyptians, Babylonians, and -Hittites. By 1200 B.C., the whole thing had fragmented: the peoples of -the sea were restless in their isles, and the great ancient centers in +Hittites. By 1200 B.C., the whole thing had fragmented: �the peoples of +the sea were restless in their isles,� and the great ancient centers in Egypt, Mesopotamia, and Anatolia were eclipsed. Numerous smaller states arose--Assyria, Phoenicia, Israel--and the Trojan war was fought. Finally Assyria became the paramount power of all the Near East, @@ -5135,7 +5135,7 @@ but casting them with its own tradition into a new mould, arose in mainland Greece. I once shocked my Classical colleagues to the core by referring to -Greece as a second degree derived civilization, but there is much +Greece as �a second degree derived civilization,� but there is much truth in this. The principles of bronze- and then of iron-working, of the alphabet, and of many other elements in Greek culture were borrowed from western Asia. Our debt to the Greeks is too well known for me even @@ -5146,7 +5146,7 @@ Greece fell in its turn to Rome, and in 55 B.C. Caesar invaded Britain. I last spoke of Britain on page 142; I had chosen it as my single example for telling you something of how the earliest farming communities were established in Europe. Now I will continue with -Britains later prehistory, so you may sense something of the end of +Britain�s later prehistory, so you may sense something of the end of prehistory itself. Remember that Britain is simply a single example we select; the same thing could be done for all the other countries of Europe, and will be possible also, some day, for further Asia and @@ -5186,20 +5186,20 @@ few Battle-axe folk elements, including, in fact, stone battle-axes, reached England with the earliest Beaker folk,[6] coming from the Rhineland. - [6] The British authors use the term Beaker folk to mean both + [6] The British authors use the term �Beaker folk� to mean both archeological assemblage and human physical type. They speak - of a ... tall, heavy-boned, rugged, and round-headed strain + of a �... tall, heavy-boned, rugged, and round-headed� strain which they take to have developed, apparently in the Rhineland, by a mixture of the original (Spanish?) beaker-makers and the northeast European battle-axe makers. However, since the science of physical anthropology is very much in flux at the moment, and since I am not able to assess the evidence for these - physical types, I _do not_ use the term folk in this book with + physical types, I _do not_ use the term �folk� in this book with its usual meaning of standardized physical type. When I use - folk here, I mean simply _the makers of a given archeological + �folk� here, I mean simply _the makers of a given archeological assemblage_. The difficulty only comes when assemblages are named for some item in them; it is too clumsy to make an - adjective of the item and refer to a beakerian assemblage. + adjective of the item and refer to a �beakerian� assemblage. The Beaker folk settled earliest in the agriculturally fertile south and east. There seem to have been several phases of Beaker folk @@ -5211,7 +5211,7 @@ folk are known. They buried their dead singly, sometimes in conspicuous individual barrows with the dead warrior in his full trappings. The spectacular element in the assemblage of the Beaker folk is a group of large circular monuments with ditches and with uprights of wood or -stone. These henges became truly monumental several hundred years +stone. These �henges� became truly monumental several hundred years later; while they were occasionally dedicated with a burial, they were not primarily tombs. The effect of the invasion of the Beaker folk seems to cut across the whole fabric of life in Britain. @@ -5221,7 +5221,7 @@ seems to cut across the whole fabric of life in Britain. There was, however, a second major element in British life at this time. It shows itself in the less well understood traces of a group again called after one of the items in their catalogue, the Food-vessel -folk. There are many burials in these food-vessel pots in northern +folk. There are many burials in these �food-vessel� pots in northern England, Scotland, and Ireland, and the pottery itself seems to link back to that of the Peterborough assemblage. Like the earlier Peterborough people in the highland zone before them, the makers of @@ -5238,8 +5238,8 @@ MORE INVASIONS About 1500 B.C., the situation became further complicated by the arrival of new people in the region of southern England anciently called Wessex. The traces suggest the Brittany coast of France as a -source, and the people seem at first to have been a small but heroic -group of aristocrats. Their heroes are buried with wealth and +source, and the people seem at first to have been a small but �heroic� +group of aristocrats. Their �heroes� are buried with wealth and ceremony, surrounded by their axes and daggers of bronze, their gold ornaments, and amber and jet beads. These rich finds show that the trade-linkage these warriors patronized spread from the Baltic sources @@ -5265,10 +5265,10 @@ which must have been necessary before such a great monument could have been built. -THIS ENGLAND +�THIS ENGLAND� The range from 1900 to about 1400 B.C. includes the time of development -of the archeological features usually called the Early Bronze Age +of the archeological features usually called the �Early Bronze Age� in Britain. In fact, traces of the Wessex warriors persisted down to about 1200 B.C. The main regions of the island were populated, and the adjustments to the highland and lowland zones were distinct and well @@ -5279,7 +5279,7 @@ trading role, separated from the European continent but conveniently adjacent to it. The tin of Cornwall--so important in the production of good bronze--as well as the copper of the west and of Ireland, taken with the gold of Ireland and the general excellence of Irish -metal work, assured Britain a traders place in the then known world. +metal work, assured Britain a trader�s place in the then known world. Contacts with the eastern Mediterranean may have been by sea, with Cornish tin as the attraction, or may have been made by the Food-vessel middlemen on their trips to the Baltic coast. There they would have @@ -5292,9 +5292,9 @@ relative isolation gave some peace and also gave time for a leveling and further fusion of culture. The separate cultural traditions began to have more in common. The growing of barley, the herding of sheep and cattle, and the production of woolen garments were already features -common to all Britains inhabitants save a few in the remote highlands, +common to all Britain�s inhabitants save a few in the remote highlands, the far north, and the distant islands not yet fully touched by -food-production. The personality of Britain was being formed. +food-production. The �personality of Britain� was being formed. CREMATION BURIALS BEGIN @@ -5325,9 +5325,9 @@ which we shall mention below. The British cremation-burial-in-urns folk survived a long time in the highland zone. In the general British scheme, they make up what is -called the Middle Bronze Age, but in the highland zone they last +called the �Middle Bronze Age,� but in the highland zone they last until after 900 B.C. and are considered to be a specialized highland -Late Bronze Age. In the highland zone, these later cremation-burial +�Late Bronze Age.� In the highland zone, these later cremation-burial folk seem to have continued the older Food-vessel tradition of being middlemen in the metal market. @@ -5379,12 +5379,12 @@ to get a picture of estate or tribal boundaries which included village communities; we find a variety of tools in bronze, and even whetstones which show that iron has been honed on them (although the scarce iron has not been found). Let me give you the picture in Professor S. -Piggotts words: The ... Late Bronze Age of southern England was but +Piggott�s words: �The ... Late Bronze Age of southern England was but the forerunner of the earliest Iron Age in the same region, not only in the techniques of agriculture, but almost certainly in terms of ethnic kinship ... we can with some assurance talk of the Celts ... the great early Celtic expansion of the Continent is recognized to be that of the -Urnfield people. +Urnfield people.� Thus, certainly by 500 B.C., there were people in Britain, some of whose descendants we may recognize today in name or language in remote @@ -5399,11 +5399,11 @@ efficient set of tools than does bronze. Iron tools seem first to have been made in quantity in Hittite Anatolia about 1500 B.C. In continental Europe, the earliest, so-called Hallstatt, iron-using cultures appeared in Germany soon after 750 B.C. Somewhat later, -Greek and especially Etruscan exports of _objets dart_--which moved +Greek and especially Etruscan exports of _objets d�art_--which moved with a flourishing trans-Alpine wine trade--influenced the Hallstatt iron-working tradition. Still later new classical motifs, together with older Hallstatt, oriental, and northern nomad motifs, gave rise to a -new style in metal decoration which characterizes the so-called La Tne +new style in metal decoration which characterizes the so-called La T�ne phase. A few iron users reached Britain a little before 400 B.C. Not long @@ -5422,7 +5422,7 @@ HILL-FORTS AND FARMS The earliest iron-users seem to have entrenched themselves temporarily within hill-top forts, mainly in the south. Gradually, they moved inland, establishing _individual_ farm sites with extensive systems -of rectangular fields. We recognize these fields by the lynchets or +of rectangular fields. We recognize these fields by the �lynchets� or lines of soil-creep which plowing left on the slopes of hills. New crops appeared; there were now bread wheat, oats, and rye, as well as barley. @@ -5434,7 +5434,7 @@ various outbuildings and pits for the storage of grain. Weaving was done on the farm, but not blacksmithing, which must have been a specialized trade. Save for the lack of firearms, the place might almost be taken for a farmstead on the American frontier in the early -1800s. +1800�s. Toward 250 B.C. there seems to have been a hasty attempt to repair the hill-forts and to build new ones, evidently in response to signs of @@ -5446,9 +5446,9 @@ THE SECOND PHASE Perhaps the hill-forts were not entirely effective or perhaps a compromise was reached. In any case, the newcomers from the Marne district did establish themselves, first in the southeast and then to -the north and west. They brought iron with decoration of the La Tne +the north and west. They brought iron with decoration of the La T�ne type and also the two-wheeled chariot. Like the Wessex warriors of -over a thousand years earlier, they made heroes graves, with their +over a thousand years earlier, they made �heroes�� graves, with their warriors buried in the war-chariots and dressed in full trappings. [Illustration: CELTIC BUCKLE] @@ -5457,7 +5457,7 @@ The metal work of these Marnian newcomers is excellent. The peculiar Celtic art style, based originally on the classic tendril motif, is colorful and virile, and fits with Greek and Roman descriptions of Celtic love of color in dress. There is a strong trace of these -newcomers northward in Yorkshire, linked by Ptolemys description to +newcomers northward in Yorkshire, linked by Ptolemy�s description to the Parisii, doubtless part of the Celtic tribe which originally gave its name to Paris on the Seine. Near Glastonbury, in Somerset, two villages in swamps have been excavated. They seem to date toward the @@ -5469,7 +5469,7 @@ villagers. In Scotland, which yields its first iron tools at a date of about 100 B.C., and in northern Ireland even slightly earlier, the effects of the -two phases of newcomers tend especially to blend. Hill-forts, brochs +two phases of newcomers tend especially to blend. Hill-forts, �brochs� (stone-built round towers) and a variety of other strange structures seem to appear as the new ideas develop in the comparative isolation of northern Britain. @@ -5493,27 +5493,27 @@ at last, we can even begin to speak of dynasties and individuals. Some time before 55 B.C., the Catuvellauni, originally from the Marne district in France, had possessed themselves of a large part of southeastern England. They evidently sailed up the Thames and built a -town of over a hundred acres in area. Here ruled Cassivellaunus, the -first man in England whose name we know, and whose town Caesar sacked. +town of over a hundred acres in area. Here ruled Cassivellaunus, �the +first man in England whose name we know,� and whose town Caesar sacked. The town sprang up elsewhere again, however. THE END OF PREHISTORY Prehistory, strictly speaking, is now over in southern Britain. -Claudius effective invasion took place in 43 A.D.; by 83 A.D., a raid +Claudius� effective invasion took place in 43 A.D.; by 83 A.D., a raid had been made as far north as Aberdeen in Scotland. But by 127 A.D., Hadrian had completed his wall from the Solway to the Tyne, and the Romans settled behind it. In Scotland, Romanization can have affected -the countryside very little. Professor Piggott adds that ... it is +the countryside very little. Professor Piggott adds that �... it is when the pressure of Romanization is relaxed by the break-up of the Dark Ages that we see again the Celtic metal-smiths handling their material with the same consummate skill as they had before the Roman Conquest, and with traditional styles that had not even then forgotten -their Marnian and Belgic heritage. +their Marnian and Belgic heritage.� In fact, many centuries go by, in Britain as well as in the rest of -Europe, before the archeologists task is complete and the historian on +Europe, before the archeologist�s task is complete and the historian on his own is able to describe the ways of men in the past. @@ -5524,7 +5524,7 @@ you will have noticed how often I had to refer to the European continent itself. Britain, beyond the English Channel for all of her later prehistory, had a much simpler course of events than did most of the rest of Europe in later prehistoric times. This holds, in spite -of all the invasions and reverberations from the continent. Most +of all the �invasions� and �reverberations� from the continent. Most of Europe was the scene of an even more complicated ebb and flow of cultural change, save in some of its more remote mountain valleys and peninsulas. @@ -5536,7 +5536,7 @@ accounts and some good general accounts of part of the range from about 3000 B.C. to A.D. 1. I suspect that the difficulty of making a good book that covers all of its later prehistory is another aspect of what makes Europe so very complicated a continent today. The prehistoric -foundations for Europes very complicated set of civilizations, +foundations for Europe�s very complicated set of civilizations, cultures, and sub-cultures--which begin to appear as history proceeds--were in themselves very complicated. @@ -5552,8 +5552,8 @@ of their journeys. But by the same token, they had had time en route to take on their characteristic European aspects. Some time ago, Sir Cyril Fox wrote a famous book called _The -Personality of Britain_, sub-titled Its Influence on Inhabitant and -Invader in Prehistoric and Early Historic Times. We have not gone +Personality of Britain_, sub-titled �Its Influence on Inhabitant and +Invader in Prehistoric and Early Historic Times.� We have not gone into the post-Roman early historic period here; there are still the Anglo-Saxons and Normans to account for as well as the effects of the Romans. But what I have tried to do was to begin the story of @@ -5570,7 +5570,7 @@ Summary In the pages you have read so far, you have been brought through the -earliest 99 per cent of the story of mans life on this planet. I have +earliest 99 per cent of the story of man�s life on this planet. I have left only 1 per cent of the story for the historians to tell. @@ -5601,7 +5601,7 @@ But I think there may have been a few. Certainly the pace of the first act accelerated with the swing from simple gathering to more intensified collecting. The great cave art of France and Spain was probably an expression of a climax. Even the ideas of burying the dead -and of the Venus figurines must also point to levels of human thought +and of the �Venus� figurines must also point to levels of human thought and activity that were over and above pure food-getting. @@ -5629,7 +5629,7 @@ five thousand years after the second act began. But it could never have happened in the first act at all. There is another curious thing about the first act. Many of the players -didnt know it was over and they kept on with their roles long after +didn�t know it was over and they kept on with their roles long after the second act had begun. On the edges of the stage there are today some players who are still going on with the first act. The Eskimos, and the native Australians, and certain tribes in the Amazon jungle are @@ -5680,20 +5680,20 @@ act may have lessons for us and give depth to our thinking. I know there are at least _some_ lessons, even in the present incomplete state of our knowledge. The players who began the second act--that of food-production--separately, in different parts of the world, were not -all of one pure race nor did they have pure cultural traditions. +all of one �pure race� nor did they have �pure� cultural traditions. Some apparently quite mixed Mediterraneans got off to the first start on the second act and brought it to its first two climaxes as well. Peoples of quite different physical type achieved the first climaxes in China and in the New World. In our British example of how the late prehistory of Europe worked, we -listed a continuous series of invasions and reverberations. After +listed a continuous series of �invasions� and �reverberations.� After each of these came fusion. Even though the Channel protected Britain from some of the extreme complications of the mixture and fusion of continental Europe, you can see how silly it would be to refer to a -pure British race or a pure British culture. We speak of the United -States as a melting pot. But this is nothing new. Actually, Britain -and all the rest of the world have been melting pots at one time or +�pure� British race or a �pure� British culture. We speak of the United +States as a �melting pot.� But this is nothing new. Actually, Britain +and all the rest of the world have been �melting pots� at one time or another. By the time the written records of Mesopotamia and Egypt begin to turn @@ -5703,12 +5703,12 @@ itself, we are thrown back on prehistoric archeology. And this is as true for China, India, Middle America, and the Andes, as it is for the Near East. -There are lessons to be learned from all of mans past, not simply +There are lessons to be learned from all of man�s past, not simply lessons of how to fight battles or win peace conferences, but of how human society evolves from one stage to another. Many of these lessons can only be looked for in the prehistoric past. So far, we have only made a beginning. There is much still to do, and many gaps in the story -are yet to be filled. The prehistorians job is to find the evidence, +are yet to be filled. The prehistorian�s job is to find the evidence, to fill the gaps, and to discover the lessons men have learned in the past. As I see it, this is not only an exciting but a very practical goal for which to strive. @@ -5745,7 +5745,7 @@ paperbound books.) GEOCHRONOLOGY AND THE ICE AGE -(Two general books. Some Pleistocene geologists disagree with Zeuners +(Two general books. Some Pleistocene geologists disagree with Zeuner�s interpretation of the dating evidence, but their points of view appear in professional journals, in articles too cumbersome to list here.) @@ -5815,7 +5815,7 @@ GENERAL PREHISTORY Press. Movius, Hallam L., Jr. - Old World Prehistory: Paleolithic in _Anthropology Today_. + �Old World Prehistory: Paleolithic� in _Anthropology Today_. Kroeber, A. L., ed. 1953. University of Chicago Press. Oakley, Kenneth P. @@ -5826,7 +5826,7 @@ GENERAL PREHISTORY _British Prehistory._ 1949. Oxford University Press. Pittioni, Richard - _Die Urgeschichtlichen Grundlagen der Europischen Kultur._ + _Die Urgeschichtlichen Grundlagen der Europ�ischen Kultur._ 1949. Deuticke. (A single book which does attempt to cover the whole range of European prehistory to ca. 1 A.D.) @@ -5834,7 +5834,7 @@ GENERAL PREHISTORY THE NEAR EAST Adams, Robert M. - Developmental Stages in Ancient Mesopotamia, _in_ Steward, + �Developmental Stages in Ancient Mesopotamia,� _in_ Steward, Julian, _et al_, _Irrigation Civilizations: A Comparative Study_. 1955. Pan American Union. @@ -6000,7 +6000,7 @@ Index Bolas, 54 - Bordes, Franois, 62 + Bordes, Fran�ois, 62 Borer, 77 @@ -6028,7 +6028,7 @@ Index killed by stampede, 86 Burials, 66, 86; - in henges, 164; + in �henges,� 164; in urns, 168 Burins, 75 @@ -6085,7 +6085,7 @@ Index Combe Capelle, 30 - Combe Capelle-Brnn group, 34 + Combe Capelle-Br�nn group, 34 Commont, Victor, 51 @@ -6097,7 +6097,7 @@ Index Corrals for cattle, 140 - Cradle of mankind, 136 + �Cradle of mankind,� 136 Cremation, 167 @@ -6123,7 +6123,7 @@ Index Domestication, of animals, 100, 105, 107; of plants, 100 - Dragon teeth fossils in China, 28 + �Dragon teeth� fossils in China, 28 Drill, 77 @@ -6176,9 +6176,9 @@ Index Fayum, 135; radiocarbon date, 146 - Fertile Crescent, 107, 146 + �Fertile Crescent,� 107, 146 - Figurines, Venus, 84; + Figurines, �Venus,� 84; at Jarmo, 128; at Ubaid, 153 @@ -6197,7 +6197,7 @@ Index Flint industry, 127 - Fontchevade, 32, 56, 58 + Font�chevade, 32, 56, 58 Food-collecting, 104, 121; end of, 104 @@ -6223,7 +6223,7 @@ Index Food-vessel folk, 164 - Forest folk, 97, 98, 104, 110 + �Forest folk,� 97, 98, 104, 110 Fox, Sir Cyril, 174 @@ -6379,7 +6379,7 @@ Index Land bridges in Mediterranean, 19 - La Tne phase, 170 + La T�ne phase, 170 Laurel leaf point, 78, 89 @@ -6404,7 +6404,7 @@ Index Mammoth, 93; in cave art, 85 - Man-apes, 26 + �Man-apes,� 26 Mango, 107 @@ -6435,7 +6435,7 @@ Index Microliths, 87; at Jarmo, 130; - lunates, 87; + �lunates,� 87; trapezoids, 87; triangles, 87 @@ -6443,7 +6443,7 @@ Index Mine-shafts, 140 - Mlefaat, 126, 127 + M�lefaat, 126, 127 Mongoloids, 29, 90 @@ -6453,9 +6453,9 @@ Index Mount Carmel, 11, 33, 52, 59, 64, 69, 113, 114 - Mousterian man, 64 + �Mousterian man,� 64 - Mousterian tools, 61, 62; + �Mousterian� tools, 61, 62; of Acheulean tradition, 62 Movius, H. L., 47 @@ -6471,7 +6471,7 @@ Index Near East, beginnings of civilization in, 20, 144; cave sites, 58; climate in Ice Age, 99; - Fertile Crescent, 107, 146; + �Fertile Crescent,� 107, 146; food-production in, 99; Natufian assemblage in, 113-115; stone tools, 114 @@ -6539,7 +6539,7 @@ Index Pig, wild, 108 - Piltdown man, 29 + �Piltdown man,� 29 Pins, 80 @@ -6578,7 +6578,7 @@ Index Race, 35; biological, 36; - pure, 16 + �pure,� 16 Radioactivity, 9, 10 @@ -6795,7 +6795,7 @@ Index Writing, 158; cuneiform, 158 - Wrm I glaciation, 58 + W�rm I glaciation, 58 Zebu cattle, domestication of, 107 @@ -6810,7 +6810,7 @@ Index -Transcribers note: +Transcriber�s note: Punctuation, hyphenation, and spelling were made consistent when a predominant preference was found in this book; otherwise they were not diff --git a/ciphers/transposition_cipher_encrypt_decrypt_file.py b/ciphers/transposition_cipher_encrypt_decrypt_file.py index 6296b1e6d..b9630243d 100644 --- a/ciphers/transposition_cipher_encrypt_decrypt_file.py +++ b/ciphers/transposition_cipher_encrypt_decrypt_file.py @@ -6,8 +6,8 @@ from . import transposition_cipher as trans_cipher def main() -> None: - input_file = "Prehistoric Men.txt" - output_file = "Output.txt" + input_file = "./prehistoric_men.txt" + output_file = "./Output.txt" key = int(input("Enter key: ")) mode = input("Encrypt/Decrypt [e/d]: ") From 24dbdd0b88bdfd4ddb940cf0b681075c66842cc3 Mon Sep 17 00:00:00 2001 From: Raghav <83136390+Raghav-Bell@users.noreply.github.com> Date: Wed, 4 Oct 2023 11:38:13 +0530 Subject: [PATCH 007/306] Update coulombs_law.py docs (#9667) * Update coulombs_law.py distance is positive non zero real number (float type) hence corrected docs which says only integer. * Update physics/coulombs_law.py --------- Co-authored-by: Tianyi Zheng --- physics/coulombs_law.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/physics/coulombs_law.py b/physics/coulombs_law.py index 252e8ec0f..fe2d358f6 100644 --- a/physics/coulombs_law.py +++ b/physics/coulombs_law.py @@ -32,7 +32,7 @@ def coulombs_law(q1: float, q2: float, radius: float) -> float: 17975103584.6 """ if radius <= 0: - raise ValueError("The radius is always a positive non zero integer") + raise ValueError("The radius is always a positive number") return round(((8.9875517923 * 10**9) * q1 * q2) / (radius**2), 2) From 3fd3497f15982a7286326b520b5e7b52767da1f3 Mon Sep 17 00:00:00 2001 From: Siddhant Totade Date: Wed, 4 Oct 2023 14:55:26 +0530 Subject: [PATCH 008/306] Add Comments (#9668) * docs : add comment in circular_linked_list.py and swap_nodes.py * docs : improve comments * docs : improved docs and tested on pre-commit * docs : add comment in circular_linked_list.py and swap_nodes.py * docs : improve comments * docs : improved docs and tested on pre-commit * docs : modified comments * Update circular_linked_list.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * docs : improved * Update data_structures/linked_list/circular_linked_list.py Co-authored-by: Christian Clauss * Update data_structures/linked_list/circular_linked_list.py Co-authored-by: Christian Clauss * Update data_structures/linked_list/swap_nodes.py Co-authored-by: Christian Clauss * Update data_structures/linked_list/swap_nodes.py Co-authored-by: Christian Clauss * Update data_structures/linked_list/swap_nodes.py Co-authored-by: Christian Clauss * Update data_structures/linked_list/swap_nodes.py Co-authored-by: Christian Clauss * Update requirements.txt Co-authored-by: Christian Clauss * Update data_structures/linked_list/circular_linked_list.py Co-authored-by: Christian Clauss * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update circular_linked_list.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../linked_list/circular_linked_list.py | 87 ++++++++++++++++--- data_structures/linked_list/swap_nodes.py | 47 ++++++++-- 2 files changed, 113 insertions(+), 21 deletions(-) diff --git a/data_structures/linked_list/circular_linked_list.py b/data_structures/linked_list/circular_linked_list.py index d9544f426..72212f46b 100644 --- a/data_structures/linked_list/circular_linked_list.py +++ b/data_structures/linked_list/circular_linked_list.py @@ -6,16 +6,29 @@ from typing import Any class Node: def __init__(self, data: Any): + """ + Initialize a new Node with the given data. + Args: + data: The data to be stored in the node. + """ self.data: Any = data - self.next: Node | None = None + self.next: Node | None = None # Reference to the next node class CircularLinkedList: - def __init__(self): - self.head = None - self.tail = None + def __init__(self) -> None: + """ + Initialize an empty Circular Linked List. + """ + self.head = None # Reference to the head (first node) + self.tail = None # Reference to the tail (last node) def __iter__(self) -> Iterator[Any]: + """ + Iterate through all nodes in the Circular Linked List yielding their data. + Yields: + The data of each node in the linked list. + """ node = self.head while self.head: yield node.data @@ -24,25 +37,48 @@ class CircularLinkedList: break def __len__(self) -> int: + """ + Get the length (number of nodes) in the Circular Linked List. + """ return sum(1 for _ in self) - def __repr__(self): + def __repr__(self) -> str: + """ + Generate a string representation of the Circular Linked List. + Returns: + A string of the format "1->2->....->N". + """ return "->".join(str(item) for item in iter(self)) def insert_tail(self, data: Any) -> None: + """ + Insert a node with the given data at the end of the Circular Linked List. + """ self.insert_nth(len(self), data) def insert_head(self, data: Any) -> None: + """ + Insert a node with the given data at the beginning of the Circular Linked List. + """ self.insert_nth(0, data) def insert_nth(self, index: int, data: Any) -> None: + """ + Insert the data of the node at the nth pos in the Circular Linked List. + Args: + index: The index at which the data should be inserted. + data: The data to be inserted. + + Raises: + IndexError: If the index is out of range. + """ if index < 0 or index > len(self): raise IndexError("list index out of range.") new_node = Node(data) if self.head is None: - new_node.next = new_node # first node points itself + new_node.next = new_node # First node points to itself self.tail = self.head = new_node - elif index == 0: # insert at head + elif index == 0: # Insert at the head new_node.next = self.head self.head = self.tail.next = new_node else: @@ -51,22 +87,43 @@ class CircularLinkedList: temp = temp.next new_node.next = temp.next temp.next = new_node - if index == len(self) - 1: # insert at tail + if index == len(self) - 1: # Insert at the tail self.tail = new_node - def delete_front(self): + def delete_front(self) -> Any: + """ + Delete and return the data of the node at the front of the Circular Linked List. + Raises: + IndexError: If the list is empty. + """ return self.delete_nth(0) def delete_tail(self) -> Any: + """ + Delete and return the data of the node at the end of the Circular Linked List. + Returns: + Any: The data of the deleted node. + Raises: + IndexError: If the index is out of range. + """ return self.delete_nth(len(self) - 1) def delete_nth(self, index: int = 0) -> Any: + """ + Delete and return the data of the node at the nth pos in Circular Linked List. + Args: + index (int): The index of the node to be deleted. Defaults to 0. + Returns: + Any: The data of the deleted node. + Raises: + IndexError: If the index is out of range. + """ if not 0 <= index < len(self): raise IndexError("list index out of range.") delete_node = self.head - if self.head == self.tail: # just one node + if self.head == self.tail: # Just one node self.head = self.tail = None - elif index == 0: # delete head node + elif index == 0: # Delete head node self.tail.next = self.tail.next.next self.head = self.head.next else: @@ -75,16 +132,22 @@ class CircularLinkedList: temp = temp.next delete_node = temp.next temp.next = temp.next.next - if index == len(self) - 1: # delete at tail + if index == len(self) - 1: # Delete at tail self.tail = temp return delete_node.data def is_empty(self) -> bool: + """ + Check if the Circular Linked List is empty. + Returns: + bool: True if the list is empty, False otherwise. + """ return len(self) == 0 def test_circular_linked_list() -> None: """ + Test cases for the CircularLinkedList class. >>> test_circular_linked_list() """ circular_linked_list = CircularLinkedList() diff --git a/data_structures/linked_list/swap_nodes.py b/data_structures/linked_list/swap_nodes.py index 3f825756b..da6aa07a7 100644 --- a/data_structures/linked_list/swap_nodes.py +++ b/data_structures/linked_list/swap_nodes.py @@ -2,30 +2,56 @@ from typing import Any class Node: - def __init__(self, data: Any): + def __init__(self, data: Any) -> None: + """ + Initialize a new Node with the given data. + + Args: + data: The data to be stored in the node. + + """ self.data = data - self.next = None + self.next = None # Reference to the next node class LinkedList: - def __init__(self): - self.head = None + def __init__(self) -> None: + """ + Initialize an empty Linked List. + """ + self.head = None # Reference to the head (first node) def print_list(self): + """ + Print the elements of the Linked List in order. + """ temp = self.head while temp is not None: print(temp.data, end=" ") temp = temp.next print() - # adding nodes - def push(self, new_data: Any): + def push(self, new_data: Any) -> None: + """ + Add a new node with the given data to the beginning of the Linked List. + Args: + new_data (Any): The data to be added to the new node. + """ new_node = Node(new_data) new_node.next = self.head self.head = new_node - # swapping nodes - def swap_nodes(self, node_data_1, node_data_2): + def swap_nodes(self, node_data_1, node_data_2) -> None: + """ + Swap the positions of two nodes in the Linked List based on their data values. + Args: + node_data_1: Data value of the first node to be swapped. + node_data_2: Data value of the second node to be swapped. + + + Note: + If either of the specified data values isn't found then, no swapping occurs. + """ if node_data_1 == node_data_2: return else: @@ -40,6 +66,7 @@ class LinkedList: if node_1 is None or node_2 is None: return + # Swap the data values of the two nodes node_1.data, node_2.data = node_2.data, node_1.data @@ -48,8 +75,10 @@ if __name__ == "__main__": for i in range(5, 0, -1): ll.push(i) + print("Original Linked List:") ll.print_list() ll.swap_nodes(1, 4) - print("After swapping") + print("After swapping the nodes whose data is 1 and 4:") + ll.print_list() From dfdd78135df938d948ba3044aca628aca08886e7 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Wed, 4 Oct 2023 12:05:00 -0400 Subject: [PATCH 009/306] Fix mypy errors in circular_linked_list.py and swap_nodes.py (#9707) * updating DIRECTORY.md * Fix mypy errors in circular_linked_list.py * Fix mypy errors in swap_nodes.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 5 ++--- .../linked_list/circular_linked_list.py | 22 +++++++++++++------ data_structures/linked_list/swap_nodes.py | 4 ++-- 3 files changed, 19 insertions(+), 12 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 9a913aa78..4f4cc423d 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -541,8 +541,8 @@ * [Basic Maths](maths/basic_maths.py) * [Binary Exp Mod](maths/binary_exp_mod.py) * [Binary Exponentiation](maths/binary_exponentiation.py) - * [Binary Exponentiation 2](maths/binary_exponentiation_2.py) * [Binary Exponentiation 3](maths/binary_exponentiation_3.py) + * [Binary Multiplication](maths/binary_multiplication.py) * [Binomial Coefficient](maths/binomial_coefficient.py) * [Binomial Distribution](maths/binomial_distribution.py) * [Bisection](maths/bisection.py) @@ -557,8 +557,7 @@ * [Decimal Isolate](maths/decimal_isolate.py) * [Decimal To Fraction](maths/decimal_to_fraction.py) * [Dodecahedron](maths/dodecahedron.py) - * [Double Factorial Iterative](maths/double_factorial_iterative.py) - * [Double Factorial Recursive](maths/double_factorial_recursive.py) + * [Double Factorial](maths/double_factorial.py) * [Dual Number Automatic Differentiation](maths/dual_number_automatic_differentiation.py) * [Entropy](maths/entropy.py) * [Euclidean Distance](maths/euclidean_distance.py) diff --git a/data_structures/linked_list/circular_linked_list.py b/data_structures/linked_list/circular_linked_list.py index 72212f46b..ef6658733 100644 --- a/data_structures/linked_list/circular_linked_list.py +++ b/data_structures/linked_list/circular_linked_list.py @@ -20,8 +20,8 @@ class CircularLinkedList: """ Initialize an empty Circular Linked List. """ - self.head = None # Reference to the head (first node) - self.tail = None # Reference to the tail (last node) + self.head: Node | None = None # Reference to the head (first node) + self.tail: Node | None = None # Reference to the tail (last node) def __iter__(self) -> Iterator[Any]: """ @@ -30,7 +30,7 @@ class CircularLinkedList: The data of each node in the linked list. """ node = self.head - while self.head: + while node: yield node.data node = node.next if node == self.head: @@ -74,17 +74,20 @@ class CircularLinkedList: """ if index < 0 or index > len(self): raise IndexError("list index out of range.") - new_node = Node(data) + new_node: Node = Node(data) if self.head is None: new_node.next = new_node # First node points to itself self.tail = self.head = new_node elif index == 0: # Insert at the head new_node.next = self.head + assert self.tail is not None # List is not empty, tail exists self.head = self.tail.next = new_node else: - temp = self.head + temp: Node | None = self.head for _ in range(index - 1): + assert temp is not None temp = temp.next + assert temp is not None new_node.next = temp.next temp.next = new_node if index == len(self) - 1: # Insert at the tail @@ -120,16 +123,21 @@ class CircularLinkedList: """ if not 0 <= index < len(self): raise IndexError("list index out of range.") - delete_node = self.head + + assert self.head is not None and self.tail is not None + delete_node: Node = self.head if self.head == self.tail: # Just one node self.head = self.tail = None elif index == 0: # Delete head node + assert self.tail.next is not None self.tail.next = self.tail.next.next self.head = self.head.next else: - temp = self.head + temp: Node | None = self.head for _ in range(index - 1): + assert temp is not None temp = temp.next + assert temp is not None and temp.next is not None delete_node = temp.next temp.next = temp.next.next if index == len(self) - 1: # Delete at tail diff --git a/data_structures/linked_list/swap_nodes.py b/data_structures/linked_list/swap_nodes.py index da6aa07a7..31dcb02bf 100644 --- a/data_structures/linked_list/swap_nodes.py +++ b/data_structures/linked_list/swap_nodes.py @@ -11,7 +11,7 @@ class Node: """ self.data = data - self.next = None # Reference to the next node + self.next: Node | None = None # Reference to the next node class LinkedList: @@ -19,7 +19,7 @@ class LinkedList: """ Initialize an empty Linked List. """ - self.head = None # Reference to the head (first node) + self.head: Node | None = None # Reference to the head (first node) def print_list(self): """ From d74349793b613b0948608409a572426a9800c3a1 Mon Sep 17 00:00:00 2001 From: halfhearted <99018821+Arunsiva003@users.noreply.github.com> Date: Wed, 4 Oct 2023 22:09:28 +0530 Subject: [PATCH 010/306] Arunsiva003 patch 1 flatten tree (#9695) * infix to prefix missing feature added * infix to prefix missing feature added * infix to prefix missing feature added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * infix to prefix missing feature added (comments) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * infix to prefix missing feature added (comments) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * newly updated infix_to_prefix * newly updated infix_to_prefix_2 * newly updated infix_to_prefix_3 * from the beginning * Created flatten_binarytree_to_linkedlist.py * Update flatten_binarytree_to_linkedlist.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update flatten_binarytree_to_linkedlist.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update flatten_binarytree_to_linkedlist.py * Update flatten_binarytree_to_linkedlist.py * Update flatten_binarytree_to_linkedlist.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update flatten_binarytree_to_linkedlist.py (space added) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update flatten_binarytree_to_linkedlist.py space added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update flatten_binarytree_to_linkedlist.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * flatten binary tree to linked list - 1 * flatten binary tree to linked list final * flatten binary tree to linked list final * review updated * Update flatten_binarytree_to_linkedlist.py * Update .pre-commit-config.yaml * Update flatten_binarytree_to_linkedlist.py * Update flatten_binarytree_to_linkedlist.py --------- Co-authored-by: ArunSiva Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../flatten_binarytree_to_linkedlist.py | 138 ++++++++++++++++++ 1 file changed, 138 insertions(+) create mode 100644 data_structures/binary_tree/flatten_binarytree_to_linkedlist.py diff --git a/data_structures/binary_tree/flatten_binarytree_to_linkedlist.py b/data_structures/binary_tree/flatten_binarytree_to_linkedlist.py new file mode 100644 index 000000000..8820a509e --- /dev/null +++ b/data_structures/binary_tree/flatten_binarytree_to_linkedlist.py @@ -0,0 +1,138 @@ +""" +Binary Tree Flattening Algorithm + +This code defines an algorithm to flatten a binary tree into a linked list +represented using the right pointers of the tree nodes. It uses in-place +flattening and demonstrates the flattening process along with a display +function to visualize the flattened linked list. +https://www.geeksforgeeks.org/flatten-a-binary-tree-into-linked-list + +Author: Arunkumar A +Date: 04/09/2023 +""" +from __future__ import annotations + + +class TreeNode: + """ + A TreeNode has data variable and pointers to TreeNode objects + for its left and right children. + """ + + def __init__(self, data: int) -> None: + self.data = data + self.left: TreeNode | None = None + self.right: TreeNode | None = None + + +def build_tree() -> TreeNode: + """ + Build and return a sample binary tree. + + Returns: + TreeNode: The root of the binary tree. + + Examples: + >>> root = build_tree() + >>> root.data + 1 + >>> root.left.data + 2 + >>> root.right.data + 5 + >>> root.left.left.data + 3 + >>> root.left.right.data + 4 + >>> root.right.right.data + 6 + """ + root = TreeNode(1) + root.left = TreeNode(2) + root.right = TreeNode(5) + root.left.left = TreeNode(3) + root.left.right = TreeNode(4) + root.right.right = TreeNode(6) + return root + + +def flatten(root: TreeNode | None) -> None: + """ + Flatten a binary tree into a linked list in-place, where the linked list is + represented using the right pointers of the tree nodes. + + Args: + root (TreeNode): The root of the binary tree to be flattened. + + Examples: + >>> root = TreeNode(1) + >>> root.left = TreeNode(2) + >>> root.right = TreeNode(5) + >>> root.left.left = TreeNode(3) + >>> root.left.right = TreeNode(4) + >>> root.right.right = TreeNode(6) + >>> flatten(root) + >>> root.data + 1 + >>> root.right.right is None + False + >>> root.right.right = TreeNode(3) + >>> root.right.right.right is None + True + """ + if not root: + return + + # Flatten the left subtree + flatten(root.left) + + # Save the right subtree + right_subtree = root.right + + # Make the left subtree the new right subtree + root.right = root.left + root.left = None + + # Find the end of the new right subtree + current = root + while current.right: + current = current.right + + # Append the original right subtree to the end + current.right = right_subtree + + # Flatten the updated right subtree + flatten(right_subtree) + + +def display_linked_list(root: TreeNode | None) -> None: + """ + Display the flattened linked list. + + Args: + root (TreeNode | None): The root of the flattened linked list. + + Examples: + >>> root = TreeNode(1) + >>> root.right = TreeNode(2) + >>> root.right.right = TreeNode(3) + >>> display_linked_list(root) + 1 2 3 + >>> root = None + >>> display_linked_list(root) + + """ + current = root + while current: + if current.right is None: + print(current.data, end="") + break + print(current.data, end=" ") + current = current.right + + +if __name__ == "__main__": + print("Flattened Linked List:") + root = build_tree() + flatten(root) + display_linked_list(root) From 922d6a88b3be2ff0dd69dd47d90e40aa95afd105 Mon Sep 17 00:00:00 2001 From: Bama Charan Chhandogi Date: Wed, 4 Oct 2023 22:51:46 +0530 Subject: [PATCH 011/306] add median of matrix (#9363) * add median of matrix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix formating * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- matrix/median_matrix.py | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 matrix/median_matrix.py diff --git a/matrix/median_matrix.py b/matrix/median_matrix.py new file mode 100644 index 000000000..116e609a5 --- /dev/null +++ b/matrix/median_matrix.py @@ -0,0 +1,38 @@ +""" +https://en.wikipedia.org/wiki/Median +""" + + +def median(matrix: list[list[int]]) -> int: + """ + Calculate the median of a sorted matrix. + + Args: + matrix: A 2D matrix of integers. + + Returns: + The median value of the matrix. + + Examples: + >>> matrix = [[1, 3, 5], [2, 6, 9], [3, 6, 9]] + >>> median(matrix) + 5 + + >>> matrix = [[1, 2, 3], [4, 5, 6]] + >>> median(matrix) + 3 + """ + # Flatten the matrix into a sorted 1D list + linear = sorted(num for row in matrix for num in row) + + # Calculate the middle index + mid = (len(linear) - 1) // 2 + + # Return the median + return linear[mid] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From d5806258d4f9eb0e5652e1edfac0613aacb71fb6 Mon Sep 17 00:00:00 2001 From: Bama Charan Chhandogi Date: Wed, 4 Oct 2023 23:48:59 +0530 Subject: [PATCH 012/306] add median of two sorted array (#9386) * add median of two sorted array * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix syntax * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix syntax * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * improve code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add documentation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/arrays/median_two_array.py | 61 ++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 data_structures/arrays/median_two_array.py diff --git a/data_structures/arrays/median_two_array.py b/data_structures/arrays/median_two_array.py new file mode 100644 index 000000000..972b0ee44 --- /dev/null +++ b/data_structures/arrays/median_two_array.py @@ -0,0 +1,61 @@ +""" +https://www.enjoyalgorithms.com/blog/median-of-two-sorted-arrays +""" + + +def find_median_sorted_arrays(nums1: list[int], nums2: list[int]) -> float: + """ + Find the median of two arrays. + + Args: + nums1: The first array. + nums2: The second array. + + Returns: + The median of the two arrays. + + Examples: + >>> find_median_sorted_arrays([1, 3], [2]) + 2.0 + + >>> find_median_sorted_arrays([1, 2], [3, 4]) + 2.5 + + >>> find_median_sorted_arrays([0, 0], [0, 0]) + 0.0 + + >>> find_median_sorted_arrays([], []) + Traceback (most recent call last): + ... + ValueError: Both input arrays are empty. + + >>> find_median_sorted_arrays([], [1]) + 1.0 + + >>> find_median_sorted_arrays([-1000], [1000]) + 0.0 + + >>> find_median_sorted_arrays([-1.1, -2.2], [-3.3, -4.4]) + -2.75 + """ + if not nums1 and not nums2: + raise ValueError("Both input arrays are empty.") + + # Merge the arrays into a single sorted array. + merged = sorted(nums1 + nums2) + total = len(merged) + + if total % 2 == 1: # If the total number of elements is odd + return float(merged[total // 2]) # then return the middle element + + # If the total number of elements is even, calculate + # the average of the two middle elements as the median. + middle1 = merged[total // 2 - 1] + middle2 = merged[total // 2] + return (float(middle1) + float(middle2)) / 2.0 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From c16d2f8865c8ce28ae6d4d815d3f6c3008e94f74 Mon Sep 17 00:00:00 2001 From: Muhammad Umer Farooq <115654418+Muhammadummerr@users.noreply.github.com> Date: Wed, 4 Oct 2023 23:43:17 +0500 Subject: [PATCH 013/306] UPDATED rat_in_maze.py (#9148) * UPDATED rat_in_maze.py * Update reddit.py in Webprogramming b/c it was causing error in pre-commit tests while raising PR. * UPDATED rat_in_maze.py * fixed return type to only maze,otherwise raise valueError. * fixed whitespaces error,improved matrix visual. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated. * Try * updated * updated * Apply suggestions from code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- backtracking/rat_in_maze.py | 181 ++++++++++++++++++++++++++---------- 1 file changed, 130 insertions(+), 51 deletions(-) diff --git a/backtracking/rat_in_maze.py b/backtracking/rat_in_maze.py index 7bde886dd..626c83cb4 100644 --- a/backtracking/rat_in_maze.py +++ b/backtracking/rat_in_maze.py @@ -1,91 +1,164 @@ from __future__ import annotations -def solve_maze(maze: list[list[int]]) -> bool: +def solve_maze( + maze: list[list[int]], + source_row: int, + source_column: int, + destination_row: int, + destination_column: int, +) -> list[list[int]]: """ This method solves the "rat in maze" problem. - In this problem we have some n by n matrix, a start point and an end point. - We want to go from the start to the end. In this matrix zeroes represent walls - and ones paths we can use. Parameters : - maze(2D matrix) : maze + - maze: A two dimensional matrix of zeros and ones. + - source_row: The row index of the starting point. + - source_column: The column index of the starting point. + - destination_row: The row index of the destination point. + - destination_column: The column index of the destination point. Returns: - Return: True if the maze has a solution or False if it does not. + - solution: A 2D matrix representing the solution path if it exists. + Raises: + - ValueError: If no solution exists or if the source or + destination coordinates are invalid. + Description: + This method navigates through a maze represented as an n by n matrix, + starting from a specified source cell and + aiming to reach a destination cell. + The maze consists of walls (1s) and open paths (0s). + By providing custom row and column values, the source and destination + cells can be adjusted. >>> maze = [[0, 1, 0, 1, 1], ... [0, 0, 0, 0, 0], ... [1, 0, 1, 0, 1], ... [0, 0, 1, 0, 0], ... [1, 0, 0, 1, 0]] - >>> solve_maze(maze) - [1, 0, 0, 0, 0] - [1, 1, 1, 1, 0] - [0, 0, 0, 1, 0] - [0, 0, 0, 1, 1] - [0, 0, 0, 0, 1] - True + >>> solve_maze(maze,0,0,len(maze)-1,len(maze)-1) # doctest: +NORMALIZE_WHITESPACE + [[0, 1, 1, 1, 1], + [0, 0, 0, 0, 1], + [1, 1, 1, 0, 1], + [1, 1, 1, 0, 0], + [1, 1, 1, 1, 0]] + + Note: + In the output maze, the zeros (0s) represent one of the possible + paths from the source to the destination. >>> maze = [[0, 1, 0, 1, 1], ... [0, 0, 0, 0, 0], ... [0, 0, 0, 0, 1], ... [0, 0, 0, 0, 0], ... [0, 0, 0, 0, 0]] - >>> solve_maze(maze) - [1, 0, 0, 0, 0] - [1, 0, 0, 0, 0] - [1, 0, 0, 0, 0] - [1, 0, 0, 0, 0] - [1, 1, 1, 1, 1] - True + >>> solve_maze(maze,0,0,len(maze)-1,len(maze)-1) # doctest: +NORMALIZE_WHITESPACE + [[0, 1, 1, 1, 1], + [0, 1, 1, 1, 1], + [0, 1, 1, 1, 1], + [0, 1, 1, 1, 1], + [0, 0, 0, 0, 0]] >>> maze = [[0, 0, 0], ... [0, 1, 0], ... [1, 0, 0]] - >>> solve_maze(maze) - [1, 1, 1] - [0, 0, 1] - [0, 0, 1] - True + >>> solve_maze(maze,0,0,len(maze)-1,len(maze)-1) # doctest: +NORMALIZE_WHITESPACE + [[0, 0, 0], + [1, 1, 0], + [1, 1, 0]] - >>> maze = [[0, 1, 0], + >>> maze = [[1, 0, 0], ... [0, 1, 0], ... [1, 0, 0]] - >>> solve_maze(maze) - No solution exists! - False + >>> solve_maze(maze,0,1,len(maze)-1,len(maze)-1) # doctest: +NORMALIZE_WHITESPACE + [[1, 0, 0], + [1, 1, 0], + [1, 1, 0]] + + >>> maze = [[1, 1, 0, 0, 1, 0, 0, 1], + ... [1, 0, 1, 0, 0, 1, 1, 1], + ... [0, 1, 0, 1, 0, 0, 1, 0], + ... [1, 1, 1, 0, 0, 1, 0, 1], + ... [0, 1, 0, 0, 1, 0, 1, 1], + ... [0, 0, 0, 1, 1, 1, 0, 1], + ... [0, 1, 0, 1, 0, 1, 1, 1], + ... [1, 1, 0, 0, 0, 0, 0, 1]] + >>> solve_maze(maze,0,2,len(maze)-1,2) # doctest: +NORMALIZE_WHITESPACE + [[1, 1, 0, 0, 1, 1, 1, 1], + [1, 1, 1, 0, 0, 1, 1, 1], + [1, 1, 1, 1, 0, 1, 1, 1], + [1, 1, 1, 0, 0, 1, 1, 1], + [1, 1, 0, 0, 1, 1, 1, 1], + [1, 1, 0, 1, 1, 1, 1, 1], + [1, 1, 0, 1, 1, 1, 1, 1], + [1, 1, 0, 1, 1, 1, 1, 1]] + >>> maze = [[1, 0, 0], + ... [0, 1, 1], + ... [1, 0, 1]] + >>> solve_maze(maze,0,1,len(maze)-1,len(maze)-1) + Traceback (most recent call last): + ... + ValueError: No solution exists! + + >>> maze = [[0, 0], + ... [1, 1]] + >>> solve_maze(maze,0,0,len(maze)-1,len(maze)-1) + Traceback (most recent call last): + ... + ValueError: No solution exists! >>> maze = [[0, 1], ... [1, 0]] - >>> solve_maze(maze) - No solution exists! - False + >>> solve_maze(maze,2,0,len(maze)-1,len(maze)-1) + Traceback (most recent call last): + ... + ValueError: Invalid source or destination coordinates + + >>> maze = [[1, 0, 0], + ... [0, 1, 0], + ... [1, 0, 0]] + >>> solve_maze(maze,0,1,len(maze),len(maze)-1) + Traceback (most recent call last): + ... + ValueError: Invalid source or destination coordinates """ size = len(maze) + # Check if source and destination coordinates are Invalid. + if not (0 <= source_row <= size - 1 and 0 <= source_column <= size - 1) or ( + not (0 <= destination_row <= size - 1 and 0 <= destination_column <= size - 1) + ): + raise ValueError("Invalid source or destination coordinates") # We need to create solution object to save path. - solutions = [[0 for _ in range(size)] for _ in range(size)] - solved = run_maze(maze, 0, 0, solutions) + solutions = [[1 for _ in range(size)] for _ in range(size)] + solved = run_maze( + maze, source_row, source_column, destination_row, destination_column, solutions + ) if solved: - print("\n".join(str(row) for row in solutions)) + return solutions else: - print("No solution exists!") - return solved + raise ValueError("No solution exists!") -def run_maze(maze: list[list[int]], i: int, j: int, solutions: list[list[int]]) -> bool: +def run_maze( + maze: list[list[int]], + i: int, + j: int, + destination_row: int, + destination_column: int, + solutions: list[list[int]], +) -> bool: """ This method is recursive starting from (i, j) and going in one of four directions: up, down, left, right. If a path is found to destination it returns True otherwise it returns False. - Parameters: - maze(2D matrix) : maze + Parameters + maze: A two dimensional matrix of zeros and ones. i, j : coordinates of matrix - solutions(2D matrix) : solutions + solutions: A two dimensional matrix of solutions. Returns: Boolean if path is found True, Otherwise False. """ size = len(maze) # Final check point. - if i == j == (size - 1): - solutions[i][j] = 1 + if i == destination_row and j == destination_column and maze[i][j] == 0: + solutions[i][j] = 0 return True lower_flag = (not i < 0) and (not j < 0) # Check lower bounds @@ -93,21 +166,27 @@ def run_maze(maze: list[list[int]], i: int, j: int, solutions: list[list[int]]) if lower_flag and upper_flag: # check for already visited and block points. - block_flag = (not solutions[i][j]) and (not maze[i][j]) + block_flag = (solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited - solutions[i][j] = 1 + solutions[i][j] = 0 # check for directions if ( - run_maze(maze, i + 1, j, solutions) - or run_maze(maze, i, j + 1, solutions) - or run_maze(maze, i - 1, j, solutions) - or run_maze(maze, i, j - 1, solutions) + run_maze(maze, i + 1, j, destination_row, destination_column, solutions) + or run_maze( + maze, i, j + 1, destination_row, destination_column, solutions + ) + or run_maze( + maze, i - 1, j, destination_row, destination_column, solutions + ) + or run_maze( + maze, i, j - 1, destination_row, destination_column, solutions + ) ): return True - solutions[i][j] = 0 + solutions[i][j] = 1 return False return False @@ -115,4 +194,4 @@ def run_maze(maze: list[list[int]], i: int, j: int, solutions: list[list[int]]) if __name__ == "__main__": import doctest - doctest.testmod() + doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE) From 26d650ec2820e265e69c88608959a3e18f28c5d5 Mon Sep 17 00:00:00 2001 From: piyush-poddar <143445461+piyush-poddar@users.noreply.github.com> Date: Thu, 5 Oct 2023 01:58:19 +0530 Subject: [PATCH 014/306] Moved relu.py from maths/ to neural_network/activation_functions (#9753) * Moved file relu.py from maths/ to neural_network/activation_functions * Renamed relu.py to rectified_linear_unit.py * Renamed relu.py to rectified_linear_unit.py in DIRECTORY.md --- DIRECTORY.md | 2 +- .../activation_functions/rectified_linear_unit.py | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename maths/relu.py => neural_network/activation_functions/rectified_linear_unit.py (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 4f4cc423d..696a059bb 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -639,7 +639,6 @@ * [Quadratic Equations Complex Numbers](maths/quadratic_equations_complex_numbers.py) * [Radians](maths/radians.py) * [Radix2 Fft](maths/radix2_fft.py) - * [Relu](maths/relu.py) * [Remove Digit](maths/remove_digit.py) * [Runge Kutta](maths/runge_kutta.py) * [Segmented Sieve](maths/segmented_sieve.py) @@ -710,6 +709,7 @@ * [Exponential Linear Unit](neural_network/activation_functions/exponential_linear_unit.py) * [Leaky Rectified Linear Unit](neural_network/activation_functions/leaky_rectified_linear_unit.py) * [Scaled Exponential Linear Unit](neural_network/activation_functions/scaled_exponential_linear_unit.py) + * [Rectified Linear Unit](neural_network/activation_functions/rectified_linear_unit.py) * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](neural_network/convolution_neural_network.py) * [Perceptron](neural_network/perceptron.py) diff --git a/maths/relu.py b/neural_network/activation_functions/rectified_linear_unit.py similarity index 100% rename from maths/relu.py rename to neural_network/activation_functions/rectified_linear_unit.py From 6a391d113d8f0efdd69e69c8da7b44766594449a Mon Sep 17 00:00:00 2001 From: Raghav <83136390+Raghav-Bell@users.noreply.github.com> Date: Thu, 5 Oct 2023 04:46:19 +0530 Subject: [PATCH 015/306] Added Photoelectric effect equation (#9666) * Added Photoelectric effect equation Photoelectric effect is one of the demonstration of quanta of energy. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixed doctest Co-authored-by: Rohan Anand <96521078+rohan472000@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Rohan Anand <96521078+rohan472000@users.noreply.github.com> --- physics/photoelectric_effect.py | 67 +++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 physics/photoelectric_effect.py diff --git a/physics/photoelectric_effect.py b/physics/photoelectric_effect.py new file mode 100644 index 000000000..3a0138ffe --- /dev/null +++ b/physics/photoelectric_effect.py @@ -0,0 +1,67 @@ +""" +The photoelectric effect is the emission of electrons when electromagnetic radiation , +such as light, hits a material. Electrons emitted in this manner are called +photoelectrons. + +In 1905, Einstein proposed a theory of the photoelectric effect using a concept that +light consists of tiny packets of energy known as photons or light quanta. Each packet +carries energy hv that is proportional to the frequency v of the corresponding +electromagnetic wave. The proportionality constant h has become known as the +Planck constant. In the range of kinetic energies of the electrons that are removed from +their varying atomic bindings by the absorption of a photon of energy hv, the highest +kinetic energy K_max is : + +K_max = hv-W + +Here, W is the minimum energy required to remove an electron from the surface of the +material. It is called the work function of the surface + +Reference: https://en.wikipedia.org/wiki/Photoelectric_effect + +""" + +PLANCK_CONSTANT_JS = 6.6261 * pow(10, -34) # in SI (Js) +PLANCK_CONSTANT_EVS = 4.1357 * pow(10, -15) # in eVs + + +def maximum_kinetic_energy( + frequency: float, work_function: float, in_ev: bool = False +) -> float: + """ + Calculates the maximum kinetic energy of emitted electron from the surface. + if the maximum kinetic energy is zero then no electron will be emitted + or given electromagnetic wave frequency is small. + + frequency (float): Frequency of electromagnetic wave. + work_function (float): Work function of the surface. + in_ev (optional)(bool): Pass True if values are in eV. + + Usage example: + >>> maximum_kinetic_energy(1000000,2) + 0 + >>> maximum_kinetic_energy(1000000,2,True) + 0 + >>> maximum_kinetic_energy(10000000000000000,2,True) + 39.357000000000006 + >>> maximum_kinetic_energy(-9,20) + Traceback (most recent call last): + ... + ValueError: Frequency can't be negative. + + >>> maximum_kinetic_energy(1000,"a") + Traceback (most recent call last): + ... + TypeError: unsupported operand type(s) for -: 'float' and 'str' + + """ + if frequency < 0: + raise ValueError("Frequency can't be negative.") + if in_ev: + return max(PLANCK_CONSTANT_EVS * frequency - work_function, 0) + return max(PLANCK_CONSTANT_JS * frequency - work_function, 0) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 2fd43c0f7ff1d7f72fa65a528ddabccf90c89a0d Mon Sep 17 00:00:00 2001 From: Tauseef Hilal Tantary Date: Thu, 5 Oct 2023 05:03:12 +0530 Subject: [PATCH 016/306] [New Algorithm] - Bell Numbers (#9324) * Add Bell Numbers * Use descriptive variable names * Add type hints * Fix failing tests --- maths/bell_numbers.py | 78 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 maths/bell_numbers.py diff --git a/maths/bell_numbers.py b/maths/bell_numbers.py new file mode 100644 index 000000000..660ec6e6a --- /dev/null +++ b/maths/bell_numbers.py @@ -0,0 +1,78 @@ +""" +Bell numbers represent the number of ways to partition a set into non-empty +subsets. This module provides functions to calculate Bell numbers for sets of +integers. In other words, the first (n + 1) Bell numbers. + +For more information about Bell numbers, refer to: +https://en.wikipedia.org/wiki/Bell_number +""" + + +def bell_numbers(max_set_length: int) -> list[int]: + """ + Calculate Bell numbers for the sets of lengths from 0 to max_set_length. + In other words, calculate first (max_set_length + 1) Bell numbers. + + Args: + max_set_length (int): The maximum length of the sets for which + Bell numbers are calculated. + + Returns: + list: A list of Bell numbers for sets of lengths from 0 to max_set_length. + + Examples: + >>> bell_numbers(0) + [1] + >>> bell_numbers(1) + [1, 1] + >>> bell_numbers(5) + [1, 1, 2, 5, 15, 52] + """ + if max_set_length < 0: + raise ValueError("max_set_length must be non-negative") + + bell = [0] * (max_set_length + 1) + bell[0] = 1 + + for i in range(1, max_set_length + 1): + for j in range(i): + bell[i] += _binomial_coefficient(i - 1, j) * bell[j] + + return bell + + +def _binomial_coefficient(total_elements: int, elements_to_choose: int) -> int: + """ + Calculate the binomial coefficient C(total_elements, elements_to_choose) + + Args: + total_elements (int): The total number of elements. + elements_to_choose (int): The number of elements to choose. + + Returns: + int: The binomial coefficient C(total_elements, elements_to_choose). + + Examples: + >>> _binomial_coefficient(5, 2) + 10 + >>> _binomial_coefficient(6, 3) + 20 + """ + if elements_to_choose in {0, total_elements}: + return 1 + + if elements_to_choose > total_elements - elements_to_choose: + elements_to_choose = total_elements - elements_to_choose + + coefficient = 1 + for i in range(elements_to_choose): + coefficient *= total_elements - i + coefficient //= i + 1 + + return coefficient + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 1fda96b7044d9fa08c84f09f54a345ebf052b2eb Mon Sep 17 00:00:00 2001 From: Sanket Kittad <86976526+sanketkittad@users.noreply.github.com> Date: Thu, 5 Oct 2023 05:10:14 +0530 Subject: [PATCH 017/306] Palindromic (#9288) * added longest palindromic subsequence * removed * added longest palindromic subsequence * added longest palindromic subsequence link * added comments --- .../longest_palindromic_subsequence.py | 44 +++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 dynamic_programming/longest_palindromic_subsequence.py diff --git a/dynamic_programming/longest_palindromic_subsequence.py b/dynamic_programming/longest_palindromic_subsequence.py new file mode 100644 index 000000000..a60d95e46 --- /dev/null +++ b/dynamic_programming/longest_palindromic_subsequence.py @@ -0,0 +1,44 @@ +""" +author: Sanket Kittad +Given a string s, find the longest palindromic subsequence's length in s. +Input: s = "bbbab" +Output: 4 +Explanation: One possible longest palindromic subsequence is "bbbb". +Leetcode link: https://leetcode.com/problems/longest-palindromic-subsequence/description/ +""" + + +def longest_palindromic_subsequence(input_string: str) -> int: + """ + This function returns the longest palindromic subsequence in a string + >>> longest_palindromic_subsequence("bbbab") + 4 + >>> longest_palindromic_subsequence("bbabcbcab") + 7 + """ + n = len(input_string) + rev = input_string[::-1] + m = len(rev) + dp = [[-1] * (m + 1) for i in range(n + 1)] + for i in range(n + 1): + dp[i][0] = 0 + for i in range(m + 1): + dp[0][i] = 0 + + # create and initialise dp array + for i in range(1, n + 1): + for j in range(1, m + 1): + # If characters at i and j are the same + # include them in the palindromic subsequence + if input_string[i - 1] == rev[j - 1]: + dp[i][j] = 1 + dp[i - 1][j - 1] + else: + dp[i][j] = max(dp[i - 1][j], dp[i][j - 1]) + + return dp[n][m] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 935d1d3225ede4c0650165d5dfd8f5eb35b54f5e Mon Sep 17 00:00:00 2001 From: Vipin Karthic <143083087+vipinkarthic@users.noreply.github.com> Date: Thu, 5 Oct 2023 11:27:55 +0530 Subject: [PATCH 018/306] Added Mirror Formulae Equation (#9717) * Python mirror_formulae.py is added to the repository * Changes done after reading readme.md * Changes for running doctest on all platforms * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Change 2 for Doctests * Changes for doctest 2 * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 9 ++- physics/mirror_formulae.py | 127 +++++++++++++++++++++++++++++++++++++ 2 files changed, 135 insertions(+), 1 deletion(-) create mode 100644 physics/mirror_formulae.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 696a059bb..5f23cbd6c 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -170,6 +170,7 @@ ## Data Structures * Arrays + * [Median Two Array](data_structures/arrays/median_two_array.py) * [Permutations](data_structures/arrays/permutations.py) * [Prefix Sum](data_structures/arrays/prefix_sum.py) * [Product Sum](data_structures/arrays/product_sum.py) @@ -185,6 +186,7 @@ * [Diff Views Of Binary Tree](data_structures/binary_tree/diff_views_of_binary_tree.py) * [Distribute Coins](data_structures/binary_tree/distribute_coins.py) * [Fenwick Tree](data_structures/binary_tree/fenwick_tree.py) + * [Flatten Binarytree To Linkedlist](data_structures/binary_tree/flatten_binarytree_to_linkedlist.py) * [Inorder Tree Traversal 2022](data_structures/binary_tree/inorder_tree_traversal_2022.py) * [Is Bst](data_structures/binary_tree/is_bst.py) * [Lazy Segment Tree](data_structures/binary_tree/lazy_segment_tree.py) @@ -324,6 +326,7 @@ * [Longest Common Substring](dynamic_programming/longest_common_substring.py) * [Longest Increasing Subsequence](dynamic_programming/longest_increasing_subsequence.py) * [Longest Increasing Subsequence O(Nlogn)](dynamic_programming/longest_increasing_subsequence_o(nlogn).py) + * [Longest Palindromic Subsequence](dynamic_programming/longest_palindromic_subsequence.py) * [Longest Sub Array](dynamic_programming/longest_sub_array.py) * [Matrix Chain Order](dynamic_programming/matrix_chain_order.py) * [Max Non Adjacent Sum](dynamic_programming/max_non_adjacent_sum.py) @@ -539,6 +542,7 @@ * [Average Mode](maths/average_mode.py) * [Bailey Borwein Plouffe](maths/bailey_borwein_plouffe.py) * [Basic Maths](maths/basic_maths.py) + * [Bell Numbers](maths/bell_numbers.py) * [Binary Exp Mod](maths/binary_exp_mod.py) * [Binary Exponentiation](maths/binary_exponentiation.py) * [Binary Exponentiation 3](maths/binary_exponentiation_3.py) @@ -690,6 +694,7 @@ * [Matrix Class](matrix/matrix_class.py) * [Matrix Operation](matrix/matrix_operation.py) * [Max Area Of Island](matrix/max_area_of_island.py) + * [Median Matrix](matrix/median_matrix.py) * [Nth Fibonacci Using Matrix Exponentiation](matrix/nth_fibonacci_using_matrix_exponentiation.py) * [Pascal Triangle](matrix/pascal_triangle.py) * [Rotate Matrix](matrix/rotate_matrix.py) @@ -708,8 +713,8 @@ * Activation Functions * [Exponential Linear Unit](neural_network/activation_functions/exponential_linear_unit.py) * [Leaky Rectified Linear Unit](neural_network/activation_functions/leaky_rectified_linear_unit.py) - * [Scaled Exponential Linear Unit](neural_network/activation_functions/scaled_exponential_linear_unit.py) * [Rectified Linear Unit](neural_network/activation_functions/rectified_linear_unit.py) + * [Scaled Exponential Linear Unit](neural_network/activation_functions/scaled_exponential_linear_unit.py) * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](neural_network/convolution_neural_network.py) * [Perceptron](neural_network/perceptron.py) @@ -756,9 +761,11 @@ * [Kinetic Energy](physics/kinetic_energy.py) * [Lorentz Transformation Four Vector](physics/lorentz_transformation_four_vector.py) * [Malus Law](physics/malus_law.py) + * [Mirror Formulae](physics/mirror_formulae.py) * [N Body Simulation](physics/n_body_simulation.py) * [Newtons Law Of Gravitation](physics/newtons_law_of_gravitation.py) * [Newtons Second Law Of Motion](physics/newtons_second_law_of_motion.py) + * [Photoelectric Effect](physics/photoelectric_effect.py) * [Potential Energy](physics/potential_energy.py) * [Rms Speed Of Molecule](physics/rms_speed_of_molecule.py) * [Shear Stress](physics/shear_stress.py) diff --git a/physics/mirror_formulae.py b/physics/mirror_formulae.py new file mode 100644 index 000000000..f1b4ac2c7 --- /dev/null +++ b/physics/mirror_formulae.py @@ -0,0 +1,127 @@ +""" +This module contains the functions to calculate the focal length, object distance +and image distance of a mirror. + +The mirror formula is an equation that relates the object distance (u), +image distance (v), and focal length (f) of a spherical mirror. +It is commonly used in optics to determine the position and characteristics +of an image formed by a mirror. It is expressed using the formulae : + +------------------- +| 1/f = 1/v + 1/u | +------------------- + +Where, +f = Focal length of the spherical mirror (metre) +v = Image distance from the mirror (metre) +u = Object distance from the mirror (metre) + + +The signs of the distances are taken with respect to the sign convention. +The sign convention is as follows: + 1) Object is always placed to the left of mirror + 2) Distances measured in the direction of the incident ray are positive + and the distances measured in the direction opposite to that of the incident + rays are negative. + 3) All distances are measured from the pole of the mirror. + + +There are a few assumptions that are made while using the mirror formulae. +They are as follows: + 1) Thin Mirror: The mirror is assumed to be thin, meaning its thickness is + negligible compared to its radius of curvature. This assumption allows + us to treat the mirror as a two-dimensional surface. + 2) Spherical Mirror: The mirror is assumed to have a spherical shape. While this + assumption may not hold exactly for all mirrors, it is a reasonable approximation + for most practical purposes. + 3) Small Angles: The angles involved in the derivation are assumed to be small. + This assumption allows us to use the small-angle approximation, where the tangent + of a small angle is approximately equal to the angle itself. It simplifies the + calculations and makes the derivation more manageable. + 4) Paraxial Rays: The mirror formula is derived using paraxial rays, which are + rays that are close to the principal axis and make small angles with it. This + assumption ensures that the rays are close enough to the principal axis, making the + calculations more accurate. + 5) Reflection and Refraction Laws: The derivation assumes that the laws of + reflection and refraction hold. + These laws state that the angle of incidence is equal to the angle of reflection + for reflection, and the incident and refracted rays lie in the same plane and + obey Snell's law for refraction. + +(Description and Assumptions adapted from +https://www.collegesearch.in/articles/mirror-formula-derivation) + +(Sign Convention adapted from +https://www.toppr.com/ask/content/concept/sign-convention-for-mirrors-210189/) + + +""" + + +def focal_length(distance_of_object: float, distance_of_image: float) -> float: + """ + >>> from math import isclose + >>> isclose(focal_length(10, 20), 6.66666666666666) + True + >>> from math import isclose + >>> isclose(focal_length(9.5, 6.7), 3.929012346) + True + >>> focal_length(0, 20) + Traceback (most recent call last): + ... + ValueError: Invalid inputs. Enter non zero values with respect + to the sign convention. + """ + + if distance_of_object == 0 or distance_of_image == 0: + raise ValueError( + "Invalid inputs. Enter non zero values with respect to the sign convention." + ) + focal_length = 1 / ((1 / distance_of_object) + (1 / distance_of_image)) + return focal_length + + +def object_distance(focal_length: float, distance_of_image: float) -> float: + """ + >>> from math import isclose + >>> isclose(object_distance(30, 20), -60.0) + True + >>> from math import isclose + >>> isclose(object_distance(10.5, 11.7), 102.375) + True + >>> object_distance(90, 0) + Traceback (most recent call last): + ... + ValueError: Invalid inputs. Enter non zero values with respect + to the sign convention. + """ + + if distance_of_image == 0 or focal_length == 0: + raise ValueError( + "Invalid inputs. Enter non zero values with respect to the sign convention." + ) + object_distance = 1 / ((1 / focal_length) - (1 / distance_of_image)) + return object_distance + + +def image_distance(focal_length: float, distance_of_object: float) -> float: + """ + >>> from math import isclose + >>> isclose(image_distance(10, 40), 13.33333333) + True + >>> from math import isclose + >>> isclose(image_distance(1.5, 6.7), 1.932692308) + True + >>> image_distance(0, 0) + Traceback (most recent call last): + ... + ValueError: Invalid inputs. Enter non zero values with respect + to the sign convention. + """ + + if distance_of_object == 0 or focal_length == 0: + raise ValueError( + "Invalid inputs. Enter non zero values with respect to the sign convention." + ) + image_distance = 1 / ((1 / focal_length) - (1 / distance_of_object)) + return image_distance From 4b6301d4ce91638d39689f7be7db797f99623964 Mon Sep 17 00:00:00 2001 From: rtang09 <49603415+rtang09@users.noreply.github.com> Date: Wed, 4 Oct 2023 23:12:08 -0700 Subject: [PATCH 019/306] Fletcher 16 (#9775) * Add files via upload * Update fletcher16.py * Update fletcher16.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fletcher16.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fletcher16.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fletcher16.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- hashes/fletcher16.py | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 hashes/fletcher16.py diff --git a/hashes/fletcher16.py b/hashes/fletcher16.py new file mode 100644 index 000000000..7c23c98d7 --- /dev/null +++ b/hashes/fletcher16.py @@ -0,0 +1,36 @@ +""" +The Fletcher checksum is an algorithm for computing a position-dependent +checksum devised by John G. Fletcher (1934–2012) at Lawrence Livermore Labs +in the late 1970s.[1] The objective of the Fletcher checksum was to +provide error-detection properties approaching those of a cyclic +redundancy check but with the lower computational effort associated +with summation techniques. + +Source: https://en.wikipedia.org/wiki/Fletcher%27s_checksum +""" + + +def fletcher16(text: str) -> int: + """ + Loop through every character in the data and add to two sums. + + >>> fletcher16('hello world') + 6752 + >>> fletcher16('onethousandfourhundredthirtyfour') + 28347 + >>> fletcher16('The quick brown fox jumps over the lazy dog.') + 5655 + """ + data = bytes(text, "ascii") + sum1 = 0 + sum2 = 0 + for character in data: + sum1 = (sum1 + character) % 255 + sum2 = (sum1 + sum2) % 255 + return (sum2 << 8) | sum1 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 0d324de7ab9c354d958fd93f6046d0111014d95a Mon Sep 17 00:00:00 2001 From: Vipin Karthic <143083087+vipinkarthic@users.noreply.github.com> Date: Thu, 5 Oct 2023 13:18:15 +0530 Subject: [PATCH 020/306] Doctest Error Correction of mirror_formulae.py (#9782) * Python mirror_formulae.py is added to the repository * Changes done after reading readme.md * Changes for running doctest on all platforms * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Change 2 for Doctests * Changes for doctest 2 * updating DIRECTORY.md * Doctest whitespace error rectification to mirror_formulae.py * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 + physics/mirror_formulae.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 5f23cbd6c..b0ba3c385 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -469,6 +469,7 @@ * [Djb2](hashes/djb2.py) * [Elf](hashes/elf.py) * [Enigma Machine](hashes/enigma_machine.py) + * [Fletcher16](hashes/fletcher16.py) * [Hamming Code](hashes/hamming_code.py) * [Luhn](hashes/luhn.py) * [Md5](hashes/md5.py) diff --git a/physics/mirror_formulae.py b/physics/mirror_formulae.py index f1b4ac2c7..7efc52438 100644 --- a/physics/mirror_formulae.py +++ b/physics/mirror_formulae.py @@ -66,7 +66,7 @@ def focal_length(distance_of_object: float, distance_of_image: float) -> float: >>> from math import isclose >>> isclose(focal_length(9.5, 6.7), 3.929012346) True - >>> focal_length(0, 20) + >>> focal_length(0, 20) # doctest: +NORMALIZE_WHITESPACE Traceback (most recent call last): ... ValueError: Invalid inputs. Enter non zero values with respect @@ -89,7 +89,7 @@ def object_distance(focal_length: float, distance_of_image: float) -> float: >>> from math import isclose >>> isclose(object_distance(10.5, 11.7), 102.375) True - >>> object_distance(90, 0) + >>> object_distance(90, 0) # doctest: +NORMALIZE_WHITESPACE Traceback (most recent call last): ... ValueError: Invalid inputs. Enter non zero values with respect @@ -112,7 +112,7 @@ def image_distance(focal_length: float, distance_of_object: float) -> float: >>> from math import isclose >>> isclose(image_distance(1.5, 6.7), 1.932692308) True - >>> image_distance(0, 0) + >>> image_distance(0, 0) # doctest: +NORMALIZE_WHITESPACE Traceback (most recent call last): ... ValueError: Invalid inputs. Enter non zero values with respect From f3be0ae9e60a0ed2185e55c0758ddf401e604f8c Mon Sep 17 00:00:00 2001 From: Naman <37952726+namansharma18899@users.noreply.github.com> Date: Thu, 5 Oct 2023 14:07:23 +0530 Subject: [PATCH 021/306] Added largest pow of 2 le num (#9374) --- bit_manipulation/largest_pow_of_two_le_num.py | 60 +++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 bit_manipulation/largest_pow_of_two_le_num.py diff --git a/bit_manipulation/largest_pow_of_two_le_num.py b/bit_manipulation/largest_pow_of_two_le_num.py new file mode 100644 index 000000000..6ef827312 --- /dev/null +++ b/bit_manipulation/largest_pow_of_two_le_num.py @@ -0,0 +1,60 @@ +""" +Author : Naman Sharma +Date : October 2, 2023 + +Task: +To Find the largest power of 2 less than or equal to a given number. + +Implementation notes: Use bit manipulation. +We start from 1 & left shift the set bit to check if (res<<1)<=number. +Each left bit shift represents a pow of 2. + +For example: +number: 15 +res: 1 0b1 + 2 0b10 + 4 0b100 + 8 0b1000 + 16 0b10000 (Exit) +""" + + +def largest_pow_of_two_le_num(number: int) -> int: + """ + Return the largest power of two less than or equal to a number. + + >>> largest_pow_of_two_le_num(0) + 0 + >>> largest_pow_of_two_le_num(1) + 1 + >>> largest_pow_of_two_le_num(-1) + 0 + >>> largest_pow_of_two_le_num(3) + 2 + >>> largest_pow_of_two_le_num(15) + 8 + >>> largest_pow_of_two_le_num(99) + 64 + >>> largest_pow_of_two_le_num(178) + 128 + >>> largest_pow_of_two_le_num(999999) + 524288 + >>> largest_pow_of_two_le_num(99.9) + Traceback (most recent call last): + ... + TypeError: Input value must be a 'int' type + """ + if isinstance(number, float): + raise TypeError("Input value must be a 'int' type") + if number <= 0: + return 0 + res = 1 + while (res << 1) <= number: + res <<= 1 + return res + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From e29024d14ade8ff4cdb43d1da6a7738f44685e5e Mon Sep 17 00:00:00 2001 From: Rohan Sardar <77870108+RohanSardar@users.noreply.github.com> Date: Thu, 5 Oct 2023 14:22:40 +0530 Subject: [PATCH 022/306] Program to convert a given string to Pig Latin (#9712) * Program to convert a given string to Pig Latin This is a program to convert a user given string to its respective Pig Latin form As per wikipedia (link: https://en.wikipedia.org/wiki/Pig_Latin#Rules) For words that begin with consonant sounds, all letters before the initial vowel are placed at the end of the word sequence. Then, "ay" is added, as in the following examples: "pig" = "igpay" "latin" = "atinlay" "banana" = "ananabay" When words begin with consonant clusters (multiple consonants that form one sound), the whole sound is added to the end when speaking or writing. "friends" = "iendsfray" "smile" = "ilesmay" "string" = "ingstray" For words that begin with vowel sounds, one just adds "hay", "way" or "yay" to the end. Examples are: "eat" = "eatway" "omelet" = "omeletway" "are" = "areway" * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update pig_latin.py Added f-string * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update pig_latin.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update pig_latin.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update pig_latin.py * Update pig_latin.py * Update pig_latin.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- strings/pig_latin.py | 44 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 strings/pig_latin.py diff --git a/strings/pig_latin.py b/strings/pig_latin.py new file mode 100644 index 000000000..457dbb5a6 --- /dev/null +++ b/strings/pig_latin.py @@ -0,0 +1,44 @@ +def pig_latin(word: str) -> str: + """Compute the piglatin of a given string. + + https://en.wikipedia.org/wiki/Pig_Latin + + Usage examples: + >>> pig_latin("pig") + 'igpay' + >>> pig_latin("latin") + 'atinlay' + >>> pig_latin("banana") + 'ananabay' + >>> pig_latin("friends") + 'iendsfray' + >>> pig_latin("smile") + 'ilesmay' + >>> pig_latin("string") + 'ingstray' + >>> pig_latin("eat") + 'eatway' + >>> pig_latin("omelet") + 'omeletway' + >>> pig_latin("are") + 'areway' + >>> pig_latin(" ") + '' + >>> pig_latin(None) + '' + """ + if not (word or "").strip(): + return "" + word = word.lower() + if word[0] in "aeiou": + return f"{word}way" + for i, char in enumerate(word): # noqa: B007 + if char in "aeiou": + break + return f"{word[i:]}{word[:i]}ay" + + +if __name__ == "__main__": + print(f"{pig_latin('friends') = }") + word = input("Enter a word: ") + print(f"{pig_latin(word) = }") From dffbe458c07d492b9c599376233f9f6295527339 Mon Sep 17 00:00:00 2001 From: Chris O <46587501+ChrisO345@users.noreply.github.com> Date: Fri, 6 Oct 2023 00:26:33 +1300 Subject: [PATCH 023/306] Update contributing guidelines to say not to open new issues for algorithms (#9760) * updated CONTRIBUTING.md with markdown anchors and issues * removed testing header from previous PR --- CONTRIBUTING.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7a67ce33c..bf3420185 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -25,8 +25,12 @@ We appreciate any contribution, from fixing a grammar mistake in a comment to im Your contribution will be tested by our [automated testing on GitHub Actions](https://github.com/TheAlgorithms/Python/actions) to save time and mental energy. After you have submitted your pull request, you should see the GitHub Actions tests start to run at the bottom of your submission page. If those tests fail, then click on the ___details___ button try to read through the GitHub Actions output to understand the failure. If you do not understand, please leave a comment on your submission page and a community member will try to help. +#### Issues + If you are interested in resolving an [open issue](https://github.com/TheAlgorithms/Python/issues), simply make a pull request with your proposed fix. __We do not assign issues in this repo__ so please do not ask for permission to work on an issue. +__Do not__ create an issue to contribute an algorithm. Please submit a pull request instead. + Please help us keep our issue list small by adding `Fixes #{$ISSUE_NUMBER}` to the description of pull requests that resolve open issues. For example, if your pull request fixes issue #10, then please add the following to its description: ``` From 0e3ea3fbab0297f38ed48b9e2f694cc43f8af567 Mon Sep 17 00:00:00 2001 From: Kamil <32775019+quant12345@users.noreply.github.com> Date: Thu, 5 Oct 2023 16:30:39 +0500 Subject: [PATCH 024/306] Fermat_little_theorem type annotation (#9794) * Replacing the generator with numpy vector operations from lu_decomposition. * Revert "Replacing the generator with numpy vector operations from lu_decomposition." This reverts commit ad217c66165898d62b76cc89ba09c2d7049b6448. * Added type annotation. * Update fermat_little_theorem.py Used other syntax. * Update fermat_little_theorem.py * Update maths/fermat_little_theorem.py --------- Co-authored-by: Tianyi Zheng --- maths/fermat_little_theorem.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maths/fermat_little_theorem.py b/maths/fermat_little_theorem.py index eea03be24..4a3ecd05c 100644 --- a/maths/fermat_little_theorem.py +++ b/maths/fermat_little_theorem.py @@ -5,7 +5,7 @@ # Wikipedia reference: https://en.wikipedia.org/wiki/Fermat%27s_little_theorem -def binary_exponentiation(a, n, mod): +def binary_exponentiation(a: int, n: float, mod: int) -> int: if n == 0: return 1 From 1b6c5cc2713743b8a74fd9c92e0a1b6442d63a7f Mon Sep 17 00:00:00 2001 From: Kamil <32775019+quant12345@users.noreply.github.com> Date: Thu, 5 Oct 2023 17:30:43 +0500 Subject: [PATCH 025/306] Karatsuba type annotation (#9800) * Replacing the generator with numpy vector operations from lu_decomposition. * Revert "Replacing the generator with numpy vector operations from lu_decomposition." This reverts commit ad217c66165898d62b76cc89ba09c2d7049b6448. * Added type annotation. --- maths/karatsuba.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maths/karatsuba.py b/maths/karatsuba.py index 4bf4aecdc..3d29e31d2 100644 --- a/maths/karatsuba.py +++ b/maths/karatsuba.py @@ -1,7 +1,7 @@ """ Multiply two numbers using Karatsuba algorithm """ -def karatsuba(a, b): +def karatsuba(a: int, b: int) -> int: """ >>> karatsuba(15463, 23489) == 15463 * 23489 True From f159a3350650843e0b3e856e612cda56eabb4237 Mon Sep 17 00:00:00 2001 From: Abul Hasan <33129246+haxkd@users.noreply.github.com> Date: Thu, 5 Oct 2023 18:09:14 +0530 Subject: [PATCH 026/306] convert to the base minus 2 of a number (#9748) * Fix: Issue 9588 * Fix: Issue 9588 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix: Issue 9588 * Fix: Issue #9588 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix: Issue #9588 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix: Issue #9588 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: issue #9793 * fix: issue #9793 * fix: issue #9588 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/base_neg2_conversion.py | 37 +++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 maths/base_neg2_conversion.py diff --git a/maths/base_neg2_conversion.py b/maths/base_neg2_conversion.py new file mode 100644 index 000000000..81d40d37e --- /dev/null +++ b/maths/base_neg2_conversion.py @@ -0,0 +1,37 @@ +def decimal_to_negative_base_2(num: int) -> int: + """ + This function returns the number negative base 2 + of the decimal number of the input data. + + Args: + int: The decimal number to convert. + + Returns: + int: The negative base 2 number. + + Examples: + >>> decimal_to_negative_base_2(0) + 0 + >>> decimal_to_negative_base_2(-19) + 111101 + >>> decimal_to_negative_base_2(4) + 100 + >>> decimal_to_negative_base_2(7) + 11011 + """ + if num == 0: + return 0 + ans = "" + while num != 0: + num, rem = divmod(num, -2) + if rem < 0: + rem += 2 + num += 1 + ans = str(rem) + ans + return int(ans) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 9bfc314e878e36a5f5d8974ec188ad7f0db8c5a1 Mon Sep 17 00:00:00 2001 From: Kamil <32775019+quant12345@users.noreply.github.com> Date: Thu, 5 Oct 2023 17:39:29 +0500 Subject: [PATCH 027/306] hardy_ramanujanalgo type annotation (#9799) * Replacing the generator with numpy vector operations from lu_decomposition. * Revert "Replacing the generator with numpy vector operations from lu_decomposition." This reverts commit ad217c66165898d62b76cc89ba09c2d7049b6448. * Added type annotation. --- maths/hardy_ramanujanalgo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maths/hardy_ramanujanalgo.py b/maths/hardy_ramanujanalgo.py index 6929533fc..31ec76fbe 100644 --- a/maths/hardy_ramanujanalgo.py +++ b/maths/hardy_ramanujanalgo.py @@ -4,7 +4,7 @@ import math -def exact_prime_factor_count(n): +def exact_prime_factor_count(n: int) -> int: """ >>> exact_prime_factor_count(51242183) 3 From 6643c955376174c307c982b1d5cc39778c40bea1 Mon Sep 17 00:00:00 2001 From: Adebisi Ahmed Date: Thu, 5 Oct 2023 14:18:54 +0100 Subject: [PATCH 028/306] add gas station (#9446) * feat: add gas station * make code more readable make code more readable * update test * Update gas_station.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * tuple[GasStation, ...] * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- greedy_methods/gas_station.py | 97 +++++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 greedy_methods/gas_station.py diff --git a/greedy_methods/gas_station.py b/greedy_methods/gas_station.py new file mode 100644 index 000000000..2427375d2 --- /dev/null +++ b/greedy_methods/gas_station.py @@ -0,0 +1,97 @@ +""" +Task: +There are n gas stations along a circular route, where the amount of gas +at the ith station is gas_quantities[i]. + +You have a car with an unlimited gas tank and it costs costs[i] of gas +to travel from the ith station to its next (i + 1)th station. +You begin the journey with an empty tank at one of the gas stations. + +Given two integer arrays gas_quantities and costs, return the starting +gas station's index if you can travel around the circuit once +in the clockwise direction otherwise, return -1. +If there exists a solution, it is guaranteed to be unique + +Reference: https://leetcode.com/problems/gas-station/description + +Implementation notes: +First, check whether the total gas is enough to complete the journey. If not, return -1. +However, if there is enough gas, it is guaranteed that there is a valid +starting index to reach the end of the journey. +Greedily calculate the net gain (gas_quantity - cost) at each station. +If the net gain ever goes below 0 while iterating through the stations, +start checking from the next station. + +""" +from dataclasses import dataclass + + +@dataclass +class GasStation: + gas_quantity: int + cost: int + + +def get_gas_stations( + gas_quantities: list[int], costs: list[int] +) -> tuple[GasStation, ...]: + """ + This function returns a tuple of gas stations. + + Args: + gas_quantities: Amount of gas available at each station + costs: The cost of gas required to move from one station to the next + + Returns: + A tuple of gas stations + + >>> gas_stations = get_gas_stations([1, 2, 3, 4, 5], [3, 4, 5, 1, 2]) + >>> len(gas_stations) + 5 + >>> gas_stations[0] + GasStation(gas_quantity=1, cost=3) + >>> gas_stations[-1] + GasStation(gas_quantity=5, cost=2) + """ + return tuple( + GasStation(quantity, cost) for quantity, cost in zip(gas_quantities, costs) + ) + + +def can_complete_journey(gas_stations: tuple[GasStation, ...]) -> int: + """ + This function returns the index from which to start the journey + in order to reach the end. + + Args: + gas_quantities [list]: Amount of gas available at each station + cost [list]: The cost of gas required to move from one station to the next + + Returns: + start [int]: start index needed to complete the journey + + Examples: + >>> can_complete_journey(get_gas_stations([1, 2, 3, 4, 5], [3, 4, 5, 1, 2])) + 3 + >>> can_complete_journey(get_gas_stations([2, 3, 4], [3, 4, 3])) + -1 + """ + total_gas = sum(gas_station.gas_quantity for gas_station in gas_stations) + total_cost = sum(gas_station.cost for gas_station in gas_stations) + if total_gas < total_cost: + return -1 + + start = 0 + net = 0 + for i, gas_station in enumerate(gas_stations): + net += gas_station.gas_quantity - gas_station.cost + if net < 0: + start = i + 1 + net = 0 + return start + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 55ee273419ae76ddeda250374921644615b88393 Mon Sep 17 00:00:00 2001 From: Wei Jiang <42140605+Jiang15@users.noreply.github.com> Date: Thu, 5 Oct 2023 16:00:48 +0200 Subject: [PATCH 029/306] [bug fixing] Edge case of the double ended queue (#9823) * fix the edge case of the double ended queue pop the last element * refactoring doc --------- Co-authored-by: Jiang15 --- data_structures/queue/double_ended_queue.py | 62 +++++++++++++++------ 1 file changed, 45 insertions(+), 17 deletions(-) diff --git a/data_structures/queue/double_ended_queue.py b/data_structures/queue/double_ended_queue.py index 44dc863b9..17a23038d 100644 --- a/data_structures/queue/double_ended_queue.py +++ b/data_structures/queue/double_ended_queue.py @@ -242,12 +242,20 @@ class Deque: Removes the last element of the deque and returns it. Time complexity: O(1) @returns topop.val: the value of the node to pop. - >>> our_deque = Deque([1, 2, 3, 15182]) - >>> our_popped = our_deque.pop() - >>> our_popped + >>> our_deque1 = Deque([1]) + >>> our_popped1 = our_deque1.pop() + >>> our_popped1 + 1 + >>> our_deque1 + [] + + >>> our_deque2 = Deque([1, 2, 3, 15182]) + >>> our_popped2 = our_deque2.pop() + >>> our_popped2 15182 - >>> our_deque + >>> our_deque2 [1, 2, 3] + >>> from collections import deque >>> deque_collections = deque([1, 2, 3, 15182]) >>> collections_popped = deque_collections.pop() @@ -255,18 +263,24 @@ class Deque: 15182 >>> deque_collections deque([1, 2, 3]) - >>> list(our_deque) == list(deque_collections) + >>> list(our_deque2) == list(deque_collections) True - >>> our_popped == collections_popped + >>> our_popped2 == collections_popped True """ # make sure the deque has elements to pop assert not self.is_empty(), "Deque is empty." topop = self._back - self._back = self._back.prev_node # set new back - # drop the last node - python will deallocate memory automatically - self._back.next_node = None + # if only one element in the queue: point the front and back to None + # else remove one element from back + if self._front == self._back: + self._front = None + self._back = None + else: + self._back = self._back.prev_node # set new back + # drop the last node, python will deallocate memory automatically + self._back.next_node = None self._len -= 1 @@ -277,11 +291,17 @@ class Deque: Removes the first element of the deque and returns it. Time complexity: O(1) @returns topop.val: the value of the node to pop. - >>> our_deque = Deque([15182, 1, 2, 3]) - >>> our_popped = our_deque.popleft() - >>> our_popped + >>> our_deque1 = Deque([1]) + >>> our_popped1 = our_deque1.pop() + >>> our_popped1 + 1 + >>> our_deque1 + [] + >>> our_deque2 = Deque([15182, 1, 2, 3]) + >>> our_popped2 = our_deque2.popleft() + >>> our_popped2 15182 - >>> our_deque + >>> our_deque2 [1, 2, 3] >>> from collections import deque >>> deque_collections = deque([15182, 1, 2, 3]) @@ -290,17 +310,23 @@ class Deque: 15182 >>> deque_collections deque([1, 2, 3]) - >>> list(our_deque) == list(deque_collections) + >>> list(our_deque2) == list(deque_collections) True - >>> our_popped == collections_popped + >>> our_popped2 == collections_popped True """ # make sure the deque has elements to pop assert not self.is_empty(), "Deque is empty." topop = self._front - self._front = self._front.next_node # set new front and drop the first node - self._front.prev_node = None + # if only one element in the queue: point the front and back to None + # else remove one element from front + if self._front == self._back: + self._front = None + self._back = None + else: + self._front = self._front.next_node # set new front and drop the first node + self._front.prev_node = None self._len -= 1 @@ -432,3 +458,5 @@ if __name__ == "__main__": import doctest doctest.testmod() + dq = Deque([3]) + dq.pop() From deb0480b3a07e50b93f88d4351d1fce000574d05 Mon Sep 17 00:00:00 2001 From: Aasheesh <126905285+AasheeshLikePanner@users.noreply.github.com> Date: Thu, 5 Oct 2023 19:37:44 +0530 Subject: [PATCH 030/306] Changing the directory of sigmoid_linear_unit.py (#9824) * Changing the directory of sigmoid_linear_unit.py * Delete neural_network/activation_functions/__init__.py --------- Co-authored-by: Tianyi Zheng --- .../activation_functions}/sigmoid_linear_unit.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename {maths => neural_network/activation_functions}/sigmoid_linear_unit.py (100%) diff --git a/maths/sigmoid_linear_unit.py b/neural_network/activation_functions/sigmoid_linear_unit.py similarity index 100% rename from maths/sigmoid_linear_unit.py rename to neural_network/activation_functions/sigmoid_linear_unit.py From 87494f1fa1022368d154477bdc035fd01f9e4382 Mon Sep 17 00:00:00 2001 From: Parth <100679824+pa-kh039@users.noreply.github.com> Date: Thu, 5 Oct 2023 21:51:28 +0530 Subject: [PATCH 031/306] largest divisible subset (#9825) * largest divisible subset * minor tweaks * adding more test cases Co-authored-by: Christian Clauss * improving code for better readability Co-authored-by: Christian Clauss * update Co-authored-by: Christian Clauss * update Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * suggested changes done, and further modfications * final update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update largest_divisible_subset.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update largest_divisible_subset.py --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../largest_divisible_subset.py | 74 +++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 dynamic_programming/largest_divisible_subset.py diff --git a/dynamic_programming/largest_divisible_subset.py b/dynamic_programming/largest_divisible_subset.py new file mode 100644 index 000000000..db38636e2 --- /dev/null +++ b/dynamic_programming/largest_divisible_subset.py @@ -0,0 +1,74 @@ +from __future__ import annotations + + +def largest_divisible_subset(items: list[int]) -> list[int]: + """ + Algorithm to find the biggest subset in the given array such that for any 2 elements + x and y in the subset, either x divides y or y divides x. + >>> largest_divisible_subset([1, 16, 7, 8, 4]) + [16, 8, 4, 1] + >>> largest_divisible_subset([1, 2, 3]) + [2, 1] + >>> largest_divisible_subset([-1, -2, -3]) + [-3] + >>> largest_divisible_subset([1, 2, 4, 8]) + [8, 4, 2, 1] + >>> largest_divisible_subset((1, 2, 4, 8)) + [8, 4, 2, 1] + >>> largest_divisible_subset([1, 1, 1]) + [1, 1, 1] + >>> largest_divisible_subset([0, 0, 0]) + [0, 0, 0] + >>> largest_divisible_subset([-1, -1, -1]) + [-1, -1, -1] + >>> largest_divisible_subset([]) + [] + """ + # Sort the array in ascending order as the sequence does not matter we only have to + # pick up a subset. + items = sorted(items) + + number_of_items = len(items) + + # Initialize memo with 1s and hash with increasing numbers + memo = [1] * number_of_items + hash_array = list(range(number_of_items)) + + # Iterate through the array + for i, item in enumerate(items): + for prev_index in range(i): + if ((items[prev_index] != 0 and item % items[prev_index]) == 0) and ( + (1 + memo[prev_index]) > memo[i] + ): + memo[i] = 1 + memo[prev_index] + hash_array[i] = prev_index + + ans = -1 + last_index = -1 + + # Find the maximum length and its corresponding index + for i, memo_item in enumerate(memo): + if memo_item > ans: + ans = memo_item + last_index = i + + # Reconstruct the divisible subset + if last_index == -1: + return [] + result = [items[last_index]] + while hash_array[last_index] != last_index: + last_index = hash_array[last_index] + result.append(items[last_index]) + + return result + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + + items = [1, 16, 7, 8, 4] + print( + f"The longest divisible subset of {items} is {largest_divisible_subset(items)}." + ) From b76115e8d184fbad1d6c400fcdd964e821f09e9b Mon Sep 17 00:00:00 2001 From: Pronay Debnath Date: Thu, 5 Oct 2023 23:03:05 +0530 Subject: [PATCH 032/306] Updated check_bipartite_graph_dfs.py (#9525) * Create dijkstra_algorithm.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dijkstra_algorithm.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dijkstra_algorithm.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dijkstra_algorithm.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Delete greedy_methods/dijkstra_algorithm.py * Update check_bipartite_graph_dfs.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update check_bipartite_graph_dfs.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update graphs/check_bipartite_graph_dfs.py Co-authored-by: Christian Clauss * Update graphs/check_bipartite_graph_dfs.py Co-authored-by: Christian Clauss * Update check_bipartite_graph_dfs.py * Update check_bipartite_graph_dfs.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update check_bipartite_graph_dfs.py * Update check_bipartite_graph_dfs.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update check_bipartite_graph_dfs.py * Update check_bipartite_graph_dfs.py * Update check_bipartite_graph_dfs.py * Let's use self-documenting variable names This is complex code so let's use self-documenting function and variable names to help readers to understand. We should not shorten names to simplify the code formatting but use understandable name and leave to code formatting to psf/black. I am not sure if `nbor` was supposed to be `neighbour`. ;-) * Update check_bipartite_graph_dfs.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- graphs/check_bipartite_graph_dfs.py | 73 +++++++++++++++++++---------- 1 file changed, 47 insertions(+), 26 deletions(-) diff --git a/graphs/check_bipartite_graph_dfs.py b/graphs/check_bipartite_graph_dfs.py index fd6442304..b13a9eb95 100644 --- a/graphs/check_bipartite_graph_dfs.py +++ b/graphs/check_bipartite_graph_dfs.py @@ -1,34 +1,55 @@ -# Check whether Graph is Bipartite or Not using DFS +from collections import defaultdict -# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, -# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex -# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, -# or u belongs to V and v to U. We can also say that there is no edge that connects -# vertices of same set. -def check_bipartite_dfs(graph): - visited = [False] * len(graph) - color = [-1] * len(graph) +def is_bipartite(graph: defaultdict[int, list[int]]) -> bool: + """ + Check whether a graph is Bipartite or not using Depth-First Search (DFS). - def dfs(v, c): - visited[v] = True - color[v] = c - for u in graph[v]: - if not visited[u]: - dfs(u, 1 - c) + A Bipartite Graph is a graph whose vertices can be divided into two independent + sets, U and V such that every edge (u, v) either connects a vertex from + U to V or a vertex from V to U. In other words, for every edge (u, v), + either u belongs to U and v to V, or u belongs to V and v to U. There is + no edge that connects vertices of the same set. - for i in range(len(graph)): - if not visited[i]: - dfs(i, 0) + Args: + graph: An adjacency list representing the graph. - for i in range(len(graph)): - for j in graph[i]: - if color[i] == color[j]: - return False + Returns: + True if there's no edge that connects vertices of the same set, False otherwise. - return True + Examples: + >>> is_bipartite( + ... defaultdict(list, {0: [1, 2], 1: [0, 3], 2: [0, 4], 3: [1], 4: [2]}) + ... ) + False + >>> is_bipartite(defaultdict(list, {0: [1, 2], 1: [0, 2], 2: [0, 1]})) + True + """ + + def depth_first_search(node: int, color: int) -> bool: + visited[node] = color + return any( + visited[neighbour] == color + or ( + visited[neighbour] == -1 + and not depth_first_search(neighbour, 1 - color) + ) + for neighbour in graph[node] + ) + + visited: defaultdict[int, int] = defaultdict(lambda: -1) + + return all( + not (visited[node] == -1 and not depth_first_search(node, 0)) for node in graph + ) -# Adjacency list of graph -graph = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} -print(check_bipartite_dfs(graph)) +if __name__ == "__main__": + import doctest + + result = doctest.testmod() + + if result.failed: + print(f"{result.failed} test(s) failed.") + else: + print("All tests passed!") From cffdf99c55dcda89a5ce0fb2bf3cb685d168d136 Mon Sep 17 00:00:00 2001 From: Muhammad Umer Farooq <115654418+Muhammadummerr@users.noreply.github.com> Date: Thu, 5 Oct 2023 23:44:55 +0500 Subject: [PATCH 033/306] Updated prime_numbers.py testcases. (#9851) * Updated prime_numbers.py testcases. * revert __main__ code. --- maths/prime_numbers.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/maths/prime_numbers.py b/maths/prime_numbers.py index c5297ed92..38cc66703 100644 --- a/maths/prime_numbers.py +++ b/maths/prime_numbers.py @@ -17,8 +17,8 @@ def slow_primes(max_n: int) -> Generator[int, None, None]: [2, 3, 5, 7, 11] >>> list(slow_primes(33)) [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31] - >>> list(slow_primes(10000))[-1] - 9973 + >>> list(slow_primes(1000))[-1] + 997 """ numbers: Generator = (i for i in range(1, (max_n + 1))) for i in (n for n in numbers if n > 1): @@ -44,8 +44,8 @@ def primes(max_n: int) -> Generator[int, None, None]: [2, 3, 5, 7, 11] >>> list(primes(33)) [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31] - >>> list(primes(10000))[-1] - 9973 + >>> list(primes(1000))[-1] + 997 """ numbers: Generator = (i for i in range(1, (max_n + 1))) for i in (n for n in numbers if n > 1): @@ -73,8 +73,8 @@ def fast_primes(max_n: int) -> Generator[int, None, None]: [2, 3, 5, 7, 11] >>> list(fast_primes(33)) [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31] - >>> list(fast_primes(10000))[-1] - 9973 + >>> list(fast_primes(1000))[-1] + 997 """ numbers: Generator = (i for i in range(1, (max_n + 1), 2)) # It's useless to test even numbers as they will not be prime From 5869fda74245b55a3bda4ccc5ac62a84ab40766f Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Thu, 5 Oct 2023 23:55:13 +0200 Subject: [PATCH 034/306] print reverse: A LinkedList with a tail pointer (#9875) * print reverse: A LinkedList with a tail pointer * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 7 +- data_structures/linked_list/print_reverse.py | 140 +++++++++++++------ 2 files changed, 104 insertions(+), 43 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index b0ba3c385..c199a4329 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -50,6 +50,7 @@ * [Index Of Rightmost Set Bit](bit_manipulation/index_of_rightmost_set_bit.py) * [Is Even](bit_manipulation/is_even.py) * [Is Power Of Two](bit_manipulation/is_power_of_two.py) + * [Largest Pow Of Two Le Num](bit_manipulation/largest_pow_of_two_le_num.py) * [Missing Number](bit_manipulation/missing_number.py) * [Numbers Different Signs](bit_manipulation/numbers_different_signs.py) * [Reverse Bits](bit_manipulation/reverse_bits.py) @@ -322,6 +323,7 @@ * [Integer Partition](dynamic_programming/integer_partition.py) * [Iterating Through Submasks](dynamic_programming/iterating_through_submasks.py) * [Knapsack](dynamic_programming/knapsack.py) + * [Largest Divisible Subset](dynamic_programming/largest_divisible_subset.py) * [Longest Common Subsequence](dynamic_programming/longest_common_subsequence.py) * [Longest Common Substring](dynamic_programming/longest_common_substring.py) * [Longest Increasing Subsequence](dynamic_programming/longest_increasing_subsequence.py) @@ -460,6 +462,7 @@ ## Greedy Methods * [Fractional Knapsack](greedy_methods/fractional_knapsack.py) * [Fractional Knapsack 2](greedy_methods/fractional_knapsack_2.py) + * [Gas Station](greedy_methods/gas_station.py) * [Minimum Waiting Time](greedy_methods/minimum_waiting_time.py) * [Optimal Merge Pattern](greedy_methods/optimal_merge_pattern.py) @@ -542,6 +545,7 @@ * [Average Median](maths/average_median.py) * [Average Mode](maths/average_mode.py) * [Bailey Borwein Plouffe](maths/bailey_borwein_plouffe.py) + * [Base Neg2 Conversion](maths/base_neg2_conversion.py) * [Basic Maths](maths/basic_maths.py) * [Bell Numbers](maths/bell_numbers.py) * [Binary Exp Mod](maths/binary_exp_mod.py) @@ -657,7 +661,6 @@ * [P Series](maths/series/p_series.py) * [Sieve Of Eratosthenes](maths/sieve_of_eratosthenes.py) * [Sigmoid](maths/sigmoid.py) - * [Sigmoid Linear Unit](maths/sigmoid_linear_unit.py) * [Signum](maths/signum.py) * [Simpson Rule](maths/simpson_rule.py) * [Simultaneous Linear Equation Solver](maths/simultaneous_linear_equation_solver.py) @@ -716,6 +719,7 @@ * [Leaky Rectified Linear Unit](neural_network/activation_functions/leaky_rectified_linear_unit.py) * [Rectified Linear Unit](neural_network/activation_functions/rectified_linear_unit.py) * [Scaled Exponential Linear Unit](neural_network/activation_functions/scaled_exponential_linear_unit.py) + * [Sigmoid Linear Unit](neural_network/activation_functions/sigmoid_linear_unit.py) * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](neural_network/convolution_neural_network.py) * [Perceptron](neural_network/perceptron.py) @@ -1180,6 +1184,7 @@ * [Naive String Search](strings/naive_string_search.py) * [Ngram](strings/ngram.py) * [Palindrome](strings/palindrome.py) + * [Pig Latin](strings/pig_latin.py) * [Prefix Function](strings/prefix_function.py) * [Rabin Karp](strings/rabin_karp.py) * [Remove Duplicate](strings/remove_duplicate.py) diff --git a/data_structures/linked_list/print_reverse.py b/data_structures/linked_list/print_reverse.py index f83d5607f..a023745de 100644 --- a/data_structures/linked_list/print_reverse.py +++ b/data_structures/linked_list/print_reverse.py @@ -1,22 +1,91 @@ from __future__ import annotations +from collections.abc import Iterable, Iterator +from dataclasses import dataclass + +@dataclass class Node: - def __init__(self, data=None): - self.data = data - self.next = None - - def __repr__(self): - """Returns a visual representation of the node and all its following nodes.""" - string_rep = [] - temp = self - while temp: - string_rep.append(f"{temp.data}") - temp = temp.next - return "->".join(string_rep) + data: int + next_node: Node | None = None -def make_linked_list(elements_list: list): +class LinkedList: + """A class to represent a Linked List. + Use a tail pointer to speed up the append() operation. + """ + + def __init__(self) -> None: + """Initialize a LinkedList with the head node set to None. + >>> linked_list = LinkedList() + >>> (linked_list.head, linked_list.tail) + (None, None) + """ + self.head: Node | None = None + self.tail: Node | None = None # Speeds up the append() operation + + def __iter__(self) -> Iterator[int]: + """Iterate the LinkedList yielding each Node's data. + >>> linked_list = LinkedList() + >>> items = (1, 2, 3, 4, 5) + >>> linked_list.extend(items) + >>> tuple(linked_list) == items + True + """ + node = self.head + while node: + yield node.data + node = node.next_node + + def __repr__(self) -> str: + """Returns a string representation of the LinkedList. + >>> linked_list = LinkedList() + >>> str(linked_list) + '' + >>> linked_list.append(1) + >>> str(linked_list) + '1' + >>> linked_list.extend([2, 3, 4, 5]) + >>> str(linked_list) + '1 -> 2 -> 3 -> 4 -> 5' + """ + return " -> ".join([str(data) for data in self]) + + def append(self, data: int) -> None: + """Appends a new node with the given data to the end of the LinkedList. + >>> linked_list = LinkedList() + >>> str(linked_list) + '' + >>> linked_list.append(1) + >>> str(linked_list) + '1' + >>> linked_list.append(2) + >>> str(linked_list) + '1 -> 2' + """ + if self.tail: + self.tail.next_node = self.tail = Node(data) + else: + self.head = self.tail = Node(data) + + def extend(self, items: Iterable[int]) -> None: + """Appends each item to the end of the LinkedList. + >>> linked_list = LinkedList() + >>> linked_list.extend([]) + >>> str(linked_list) + '' + >>> linked_list.extend([1, 2]) + >>> str(linked_list) + '1 -> 2' + >>> linked_list.extend([3,4]) + >>> str(linked_list) + '1 -> 2 -> 3 -> 4' + """ + for item in items: + self.append(item) + + +def make_linked_list(elements_list: Iterable[int]) -> LinkedList: """Creates a Linked List from the elements of the given sequence (list/tuple) and returns the head of the Linked List. >>> make_linked_list([]) @@ -28,43 +97,30 @@ def make_linked_list(elements_list: list): >>> make_linked_list(['abc']) abc >>> make_linked_list([7, 25]) - 7->25 + 7 -> 25 """ if not elements_list: raise Exception("The Elements List is empty") - current = head = Node(elements_list[0]) - for i in range(1, len(elements_list)): - current.next = Node(elements_list[i]) - current = current.next - return head + linked_list = LinkedList() + linked_list.extend(elements_list) + return linked_list -def print_reverse(head_node: Node) -> None: +def in_reverse(linked_list: LinkedList) -> str: """Prints the elements of the given Linked List in reverse order - >>> print_reverse([]) - >>> linked_list = make_linked_list([69, 88, 73]) - >>> print_reverse(linked_list) - 73 - 88 - 69 + >>> in_reverse(LinkedList()) + '' + >>> in_reverse(make_linked_list([69, 88, 73])) + '73 <- 88 <- 69' """ - if head_node is not None and isinstance(head_node, Node): - print_reverse(head_node.next) - print(head_node.data) - - -def main(): - from doctest import testmod - - testmod() - - linked_list = make_linked_list([14, 52, 14, 12, 43]) - print("Linked List:") - print(linked_list) - print("Elements in Reverse:") - print_reverse(linked_list) + return " <- ".join(str(line) for line in reversed(tuple(linked_list))) if __name__ == "__main__": - main() + from doctest import testmod + + testmod() + linked_list = make_linked_list((14, 52, 14, 12, 43)) + print(f"Linked List: {linked_list}") + print(f"Reverse List: {in_reverse(linked_list)}") From 7f94a73eec45edfd215e8f07148c9c657b4e4b89 Mon Sep 17 00:00:00 2001 From: Marek Mazij <112333347+Mrk-Mzj@users.noreply.github.com> Date: Fri, 6 Oct 2023 00:05:23 +0200 Subject: [PATCH 035/306] camelCase to snake_case conversion - Fixes #9726 (#9727) * First commit camel case to snake case conversion algorithm, including numbers * code modified to not use regex --- strings/camel_case_to_snake_case.py | 60 +++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 strings/camel_case_to_snake_case.py diff --git a/strings/camel_case_to_snake_case.py b/strings/camel_case_to_snake_case.py new file mode 100644 index 000000000..582907be2 --- /dev/null +++ b/strings/camel_case_to_snake_case.py @@ -0,0 +1,60 @@ +def camel_to_snake_case(input_str: str) -> str: + """ + Transforms a camelCase (or PascalCase) string to snake_case + + >>> camel_to_snake_case("someRandomString") + 'some_random_string' + + >>> camel_to_snake_case("SomeRandomStr#ng") + 'some_random_str_ng' + + >>> camel_to_snake_case("123someRandom123String123") + '123_some_random_123_string_123' + + >>> camel_to_snake_case("123SomeRandom123String123") + '123_some_random_123_string_123' + + >>> camel_to_snake_case(123) + Traceback (most recent call last): + ... + ValueError: Expected string as input, found + + """ + + # check for invalid input type + if not isinstance(input_str, str): + msg = f"Expected string as input, found {type(input_str)}" + raise ValueError(msg) + + snake_str = "" + + for index, char in enumerate(input_str): + if char.isupper(): + snake_str += "_" + char.lower() + + # if char is lowercase but proceeded by a digit: + elif input_str[index - 1].isdigit() and char.islower(): + snake_str += "_" + char + + # if char is a digit proceeded by a letter: + elif input_str[index - 1].isalpha() and char.isnumeric(): + snake_str += "_" + char.lower() + + # if char is not alphanumeric: + elif not char.isalnum(): + snake_str += "_" + + else: + snake_str += char + + # remove leading underscore + if snake_str[0] == "_": + snake_str = snake_str[1:] + + return snake_str + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 13317e4f7f260f59e6e53595f802c9d12ec0db4a Mon Sep 17 00:00:00 2001 From: Akshay B Shetty <107768228+NinjaSoulPirate@users.noreply.github.com> Date: Fri, 6 Oct 2023 03:57:13 +0530 Subject: [PATCH 036/306] feat: :sparkles: calculating the resitance of resistor using color codes (#9874) --- electronics/resistor_color_code.py | 373 +++++++++++++++++++++++++++++ 1 file changed, 373 insertions(+) create mode 100644 electronics/resistor_color_code.py diff --git a/electronics/resistor_color_code.py b/electronics/resistor_color_code.py new file mode 100644 index 000000000..b0534b813 --- /dev/null +++ b/electronics/resistor_color_code.py @@ -0,0 +1,373 @@ +""" +Title : Calculating the resistance of a n band resistor using the color codes + +Description : + Resistors resist the flow of electrical current.Each one has a value that tells how + strongly it resists current flow.This value's unit is the ohm, often noted with the + Greek letter omega: Ω. + + The colored bands on a resistor can tell you everything you need to know about its + value and tolerance, as long as you understand how to read them. The order in which + the colors are arranged is very important, and each value of resistor has its own + unique combination. + + The color coding for resistors is an international standard that is defined in IEC + 60062. + + The number of bands present in a resistor varies from three to six. These represent + significant figures, multiplier, tolerance, reliability, and temperature coefficient + Each color used for a type of band has a value assigned to it. It is read from left + to right. + All resistors will have significant figures and multiplier bands. In a three band + resistor first two bands from the left represent significant figures and the third + represents the multiplier band. + + Significant figures - The number of significant figures band in a resistor can vary + from two to three. + Colors and values associated with significant figure bands - + (Black = 0, Brown = 1, Red = 2, Orange = 3, Yellow = 4, Green = 5, Blue = 6, + Violet = 7, Grey = 8, White = 9) + + Multiplier - There will be one multiplier band in a resistor. It is multiplied with + the significant figures obtained from previous bands. + Colors and values associated with multiplier band - + (Black = 100, Brown = 10^1, Red = 10^2, Orange = 10^3, Yellow = 10^4, Green = 10^5, + Blue = 10^6, Violet = 10^7, Grey = 10^8, White = 10^9, Gold = 10^-1, Silver = 10^-2) + Note that multiplier bands use Gold and Silver which are not used for significant + figure bands. + + Tolerance - The tolerance band is not always present. It can be seen in four band + resistors and above. This is a percentage by which the resistor value can vary. + Colors and values associated with tolerance band - + (Brown = 1%, Red = 2%, Orange = 0.05%, Yellow = 0.02%, Green = 0.5%,Blue = 0.25%, + Violet = 0.1%, Grey = 0.01%, Gold = 5%, Silver = 10%) + If no color is mentioned then by default tolerance is 20% + Note that tolerance band does not use Black and White colors. + + Temperature Coeffecient - Indicates the change in resistance of the component as + a function of ambient temperature in terms of ppm/K. + It is present in six band resistors. + Colors and values associated with Temperature coeffecient - + (Black = 250 ppm/K, Brown = 100 ppm/K, Red = 50 ppm/K, Orange = 15 ppm/K, + Yellow = 25 ppm/K, Green = 20 ppm/K, Blue = 10 ppm/K, Violet = 5 ppm/K, + Grey = 1 ppm/K) + Note that temperature coeffecient band does not use White, Gold, Silver colors. + +Sources : + https://www.calculator.net/resistor-calculator.html + https://learn.parallax.com/support/reference/resistor-color-codes + https://byjus.com/physics/resistor-colour-codes/ +""" +valid_colors: list = [ + "Black", + "Brown", + "Red", + "Orange", + "Yellow", + "Green", + "Blue", + "Violet", + "Grey", + "White", + "Gold", + "Silver", +] + +significant_figures_color_values: dict[str, int] = { + "Black": 0, + "Brown": 1, + "Red": 2, + "Orange": 3, + "Yellow": 4, + "Green": 5, + "Blue": 6, + "Violet": 7, + "Grey": 8, + "White": 9, +} + +multiplier_color_values: dict[str, float] = { + "Black": 10**0, + "Brown": 10**1, + "Red": 10**2, + "Orange": 10**3, + "Yellow": 10**4, + "Green": 10**5, + "Blue": 10**6, + "Violet": 10**7, + "Grey": 10**8, + "White": 10**9, + "Gold": 10**-1, + "Silver": 10**-2, +} + +tolerance_color_values: dict[str, float] = { + "Brown": 1, + "Red": 2, + "Orange": 0.05, + "Yellow": 0.02, + "Green": 0.5, + "Blue": 0.25, + "Violet": 0.1, + "Grey": 0.01, + "Gold": 5, + "Silver": 10, +} + +temperature_coeffecient_color_values: dict[str, int] = { + "Black": 250, + "Brown": 100, + "Red": 50, + "Orange": 15, + "Yellow": 25, + "Green": 20, + "Blue": 10, + "Violet": 5, + "Grey": 1, +} + +band_types: dict[int, dict[str, int]] = { + 3: {"significant": 2, "multiplier": 1}, + 4: {"significant": 2, "multiplier": 1, "tolerance": 1}, + 5: {"significant": 3, "multiplier": 1, "tolerance": 1}, + 6: {"significant": 3, "multiplier": 1, "tolerance": 1, "temp_coeffecient": 1}, +} + + +def get_significant_digits(colors: list) -> str: + """ + Function returns the digit associated with the color. Function takes a + list containing colors as input and returns digits as string + + >>> get_significant_digits(['Black','Blue']) + '06' + + >>> get_significant_digits(['Aqua','Blue']) + Traceback (most recent call last): + ... + ValueError: Aqua is not a valid color for significant figure bands + + """ + digit = "" + for color in colors: + if color not in significant_figures_color_values: + msg = f"{color} is not a valid color for significant figure bands" + raise ValueError(msg) + digit = digit + str(significant_figures_color_values[color]) + return str(digit) + + +def get_multiplier(color: str) -> float: + """ + Function returns the multiplier value associated with the color. + Function takes color as input and returns multiplier value + + >>> get_multiplier('Gold') + 0.1 + + >>> get_multiplier('Ivory') + Traceback (most recent call last): + ... + ValueError: Ivory is not a valid color for multiplier band + + """ + if color not in multiplier_color_values: + msg = f"{color} is not a valid color for multiplier band" + raise ValueError(msg) + return multiplier_color_values[color] + + +def get_tolerance(color: str) -> float: + """ + Function returns the tolerance value associated with the color. + Function takes color as input and returns tolerance value. + + >>> get_tolerance('Green') + 0.5 + + >>> get_tolerance('Indigo') + Traceback (most recent call last): + ... + ValueError: Indigo is not a valid color for tolerance band + + """ + if color not in tolerance_color_values: + msg = f"{color} is not a valid color for tolerance band" + raise ValueError(msg) + return tolerance_color_values[color] + + +def get_temperature_coeffecient(color: str) -> int: + """ + Function returns the temperature coeffecient value associated with the color. + Function takes color as input and returns temperature coeffecient value. + + >>> get_temperature_coeffecient('Yellow') + 25 + + >>> get_temperature_coeffecient('Cyan') + Traceback (most recent call last): + ... + ValueError: Cyan is not a valid color for temperature coeffecient band + + """ + if color not in temperature_coeffecient_color_values: + msg = f"{color} is not a valid color for temperature coeffecient band" + raise ValueError(msg) + return temperature_coeffecient_color_values[color] + + +def get_band_type_count(total_number_of_bands: int, type_of_band: str) -> int: + """ + Function returns the number of bands of a given type in a resistor with n bands + Function takes total_number_of_bands and type_of_band as input and returns + number of bands belonging to that type in the given resistor + + >>> get_band_type_count(3,'significant') + 2 + + >>> get_band_type_count(2,'significant') + Traceback (most recent call last): + ... + ValueError: 2 is not a valid number of bands + + >>> get_band_type_count(3,'sign') + Traceback (most recent call last): + ... + ValueError: sign is not valid for a 3 band resistor + + >>> get_band_type_count(3,'tolerance') + Traceback (most recent call last): + ... + ValueError: tolerance is not valid for a 3 band resistor + + >>> get_band_type_count(5,'temp_coeffecient') + Traceback (most recent call last): + ... + ValueError: temp_coeffecient is not valid for a 5 band resistor + + """ + if total_number_of_bands not in band_types: + msg = f"{total_number_of_bands} is not a valid number of bands" + raise ValueError(msg) + if type_of_band not in band_types[total_number_of_bands]: + msg = f"{type_of_band} is not valid for a {total_number_of_bands} band resistor" + raise ValueError(msg) + return band_types[total_number_of_bands][type_of_band] + + +def check_validity(number_of_bands: int, colors: list) -> bool: + """ + Function checks if the input provided is valid or not. + Function takes number_of_bands and colors as input and returns + True if it is valid + + >>> check_validity(3, ["Black","Blue","Orange"]) + True + + >>> check_validity(4, ["Black","Blue","Orange"]) + Traceback (most recent call last): + ... + ValueError: Expecting 4 colors, provided 3 colors + + >>> check_validity(3, ["Cyan","Red","Yellow"]) + Traceback (most recent call last): + ... + ValueError: Cyan is not a valid color + + """ + if number_of_bands >= 3 and number_of_bands <= 6: + if number_of_bands == len(colors): + for color in colors: + if color not in valid_colors: + msg = f"{color} is not a valid color" + raise ValueError(msg) + return True + else: + msg = f"Expecting {number_of_bands} colors, provided {len(colors)} colors" + raise ValueError(msg) + else: + msg = "Invalid number of bands. Resistor bands must be 3 to 6" + raise ValueError(msg) + + +def calculate_resistance(number_of_bands: int, color_code_list: list) -> dict: + """ + Function calculates the total resistance of the resistor using the color codes. + Function takes number_of_bands, color_code_list as input and returns + resistance + + >>> calculate_resistance(3, ["Black","Blue","Orange"]) + {'resistance': '6000Ω ±20% '} + + >>> calculate_resistance(4, ["Orange","Green","Blue","Gold"]) + {'resistance': '35000000Ω ±5% '} + + >>> calculate_resistance(5, ["Violet","Brown","Grey","Silver","Green"]) + {'resistance': '7.18Ω ±0.5% '} + + >>> calculate_resistance(6, ["Red","Green","Blue","Yellow","Orange","Grey"]) + {'resistance': '2560000Ω ±0.05% 1 ppm/K'} + + >>> calculate_resistance(0, ["Violet","Brown","Grey","Silver","Green"]) + Traceback (most recent call last): + ... + ValueError: Invalid number of bands. Resistor bands must be 3 to 6 + + >>> calculate_resistance(4, ["Violet","Brown","Grey","Silver","Green"]) + Traceback (most recent call last): + ... + ValueError: Expecting 4 colors, provided 5 colors + + >>> calculate_resistance(4, ["Violet","Silver","Brown","Grey"]) + Traceback (most recent call last): + ... + ValueError: Silver is not a valid color for significant figure bands + + >>> calculate_resistance(4, ["Violet","Blue","Lime","Grey"]) + Traceback (most recent call last): + ... + ValueError: Lime is not a valid color + + """ + is_valid = check_validity(number_of_bands, color_code_list) + if is_valid: + number_of_significant_bands = get_band_type_count( + number_of_bands, "significant" + ) + significant_colors = color_code_list[:number_of_significant_bands] + significant_digits = int(get_significant_digits(significant_colors)) + multiplier_color = color_code_list[number_of_significant_bands] + multiplier = get_multiplier(multiplier_color) + if number_of_bands == 3: + tolerance_color = None + else: + tolerance_color = color_code_list[number_of_significant_bands + 1] + tolerance = ( + 20 if tolerance_color is None else get_tolerance(str(tolerance_color)) + ) + if number_of_bands != 6: + temperature_coeffecient_color = None + else: + temperature_coeffecient_color = color_code_list[ + number_of_significant_bands + 2 + ] + temperature_coeffecient = ( + 0 + if temperature_coeffecient_color is None + else get_temperature_coeffecient(str(temperature_coeffecient_color)) + ) + resisitance = significant_digits * multiplier + if temperature_coeffecient == 0: + answer = f"{resisitance}Ω ±{tolerance}% " + else: + answer = f"{resisitance}Ω ±{tolerance}% {temperature_coeffecient} ppm/K" + return {"resistance": answer} + else: + raise ValueError("Input is invalid") + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From b316a9612826905b963a465f0f02febaed761ccc Mon Sep 17 00:00:00 2001 From: Abul Hasan <33129246+haxkd@users.noreply.github.com> Date: Fri, 6 Oct 2023 04:15:10 +0530 Subject: [PATCH 037/306] Match a pattern and String using backtracking (#9861) * Fix: Issue 9588 * Fix: Issue 9588 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix: Issue 9588 * Fix: Issue #9588 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix: Issue #9588 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix: Issue #9588 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: issue #9793 * fix: issue #9793 * fix: issue #9588 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: issue #9844 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: issue #9844 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: issue #9844 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: issue #9844 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- backtracking/match_word_pattern.py | 61 ++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 backtracking/match_word_pattern.py diff --git a/backtracking/match_word_pattern.py b/backtracking/match_word_pattern.py new file mode 100644 index 000000000..bfa9b1354 --- /dev/null +++ b/backtracking/match_word_pattern.py @@ -0,0 +1,61 @@ +def match_word_pattern(pattern: str, input_string: str) -> bool: + """ + Determine if a given pattern matches a string using backtracking. + + pattern: The pattern to match. + input_string: The string to match against the pattern. + return: True if the pattern matches the string, False otherwise. + + >>> match_word_pattern("aba", "GraphTreesGraph") + True + + >>> match_word_pattern("xyx", "PythonRubyPython") + True + + >>> match_word_pattern("GG", "PythonJavaPython") + False + """ + + def backtrack(pattern_index: int, str_index: int) -> bool: + """ + >>> backtrack(0, 0) + True + + >>> backtrack(0, 1) + True + + >>> backtrack(0, 4) + False + """ + if pattern_index == len(pattern) and str_index == len(input_string): + return True + if pattern_index == len(pattern) or str_index == len(input_string): + return False + char = pattern[pattern_index] + if char in pattern_map: + mapped_str = pattern_map[char] + if input_string.startswith(mapped_str, str_index): + return backtrack(pattern_index + 1, str_index + len(mapped_str)) + else: + return False + for end in range(str_index + 1, len(input_string) + 1): + substr = input_string[str_index:end] + if substr in str_map: + continue + pattern_map[char] = substr + str_map[substr] = char + if backtrack(pattern_index + 1, end): + return True + del pattern_map[char] + del str_map[substr] + return False + + pattern_map: dict[str, str] = {} + str_map: dict[str, str] = {} + return backtrack(0, 0) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From cd684fd94762c4df5529d19d1ede6fc927428815 Mon Sep 17 00:00:00 2001 From: Dean Bring Date: Thu, 5 Oct 2023 15:45:40 -0700 Subject: [PATCH 038/306] Added algorithm to deeply clone a graph (#9765) * Added algorithm to deeply clone a graph * Fixed file name and removed a function call * Removed nested function and fixed class parameter types * Fixed doctests * bug fix * Added class decorator * Updated doctests and fixed precommit errors * Cleaned up code * Simplified doctest * Added doctests * Code simplification --- graphs/deep_clone_graph.py | 77 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 graphs/deep_clone_graph.py diff --git a/graphs/deep_clone_graph.py b/graphs/deep_clone_graph.py new file mode 100644 index 000000000..55678b4c0 --- /dev/null +++ b/graphs/deep_clone_graph.py @@ -0,0 +1,77 @@ +""" +LeetCode 133. Clone Graph +https://leetcode.com/problems/clone-graph/ + +Given a reference of a node in a connected undirected graph. + +Return a deep copy (clone) of the graph. + +Each node in the graph contains a value (int) and a list (List[Node]) of its +neighbors. +""" +from dataclasses import dataclass + + +@dataclass +class Node: + value: int = 0 + neighbors: list["Node"] | None = None + + def __post_init__(self) -> None: + """ + >>> Node(3).neighbors + [] + """ + self.neighbors = self.neighbors or [] + + def __hash__(self) -> int: + """ + >>> hash(Node(3)) != 0 + True + """ + return id(self) + + +def clone_graph(node: Node | None) -> Node | None: + """ + This function returns a clone of a connected undirected graph. + >>> clone_graph(Node(1)) + Node(value=1, neighbors=[]) + >>> clone_graph(Node(1, [Node(2)])) + Node(value=1, neighbors=[Node(value=2, neighbors=[])]) + >>> clone_graph(None) is None + True + """ + if not node: + return None + + originals_to_clones = {} # map nodes to clones + + stack = [node] + + while stack: + original = stack.pop() + + if original in originals_to_clones: + continue + + originals_to_clones[original] = Node(original.value) + + stack.extend(original.neighbors or []) + + for original, clone in originals_to_clones.items(): + for neighbor in original.neighbors or []: + cloned_neighbor = originals_to_clones[neighbor] + + if not clone.neighbors: + clone.neighbors = [] + + clone.neighbors.append(cloned_neighbor) + + return originals_to_clones[node] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 9200c64464492117bff792f1f43b19050070af4a Mon Sep 17 00:00:00 2001 From: Aroson <74296409+Aroson1@users.noreply.github.com> Date: Fri, 6 Oct 2023 04:46:51 +0530 Subject: [PATCH 039/306] Added Wheatstone Bridge Algorithm (#9872) * Add files via upload * Update wheatstone_bridge.py * Update wheatstone_bridge.py --- electronics/wheatstone_bridge.py | 41 ++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 electronics/wheatstone_bridge.py diff --git a/electronics/wheatstone_bridge.py b/electronics/wheatstone_bridge.py new file mode 100644 index 000000000..3529a0933 --- /dev/null +++ b/electronics/wheatstone_bridge.py @@ -0,0 +1,41 @@ +# https://en.wikipedia.org/wiki/Wheatstone_bridge +from __future__ import annotations + + +def wheatstone_solver( + resistance_1: float, resistance_2: float, resistance_3: float +) -> float: + """ + This function can calculate the unknown resistance in an wheatstone network, + given that the three other resistances in the network are known. + The formula to calculate the same is: + + --------------- + |Rx=(R2/R1)*R3| + --------------- + + Usage examples: + >>> wheatstone_solver(resistance_1=2, resistance_2=4, resistance_3=5) + 10.0 + >>> wheatstone_solver(resistance_1=356, resistance_2=234, resistance_3=976) + 641.5280898876405 + >>> wheatstone_solver(resistance_1=2, resistance_2=-1, resistance_3=2) + Traceback (most recent call last): + ... + ValueError: All resistance values must be positive + >>> wheatstone_solver(resistance_1=0, resistance_2=0, resistance_3=2) + Traceback (most recent call last): + ... + ValueError: All resistance values must be positive + """ + + if resistance_1 <= 0 or resistance_2 <= 0 or resistance_3 <= 0: + raise ValueError("All resistance values must be positive") + else: + return float((resistance_2 / resistance_1) * resistance_3) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 19fc788197474f75c56cc3755582cc583be9e52f Mon Sep 17 00:00:00 2001 From: ojas wani <52542740+ojas-wani@users.noreply.github.com> Date: Thu, 5 Oct 2023 16:43:45 -0700 Subject: [PATCH 040/306] added laplacian_filter file (#9783) * added laplacian_filter file * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * required changes to laplacian file * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * changed laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update laplacian_filter.py * update laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * changed laplacian_filter.py * changed laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update laplacian_filter.py * Add a test --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../filters/laplacian_filter.py | 81 +++++++++++++++++++ 1 file changed, 81 insertions(+) create mode 100644 digital_image_processing/filters/laplacian_filter.py diff --git a/digital_image_processing/filters/laplacian_filter.py b/digital_image_processing/filters/laplacian_filter.py new file mode 100644 index 000000000..69b9616e4 --- /dev/null +++ b/digital_image_processing/filters/laplacian_filter.py @@ -0,0 +1,81 @@ +# @Author : ojas-wani +# @File : laplacian_filter.py +# @Date : 10/04/2023 + +import numpy as np +from cv2 import ( + BORDER_DEFAULT, + COLOR_BGR2GRAY, + CV_64F, + cvtColor, + filter2D, + imread, + imshow, + waitKey, +) + +from digital_image_processing.filters.gaussian_filter import gaussian_filter + + +def my_laplacian(src: np.ndarray, ksize: int) -> np.ndarray: + """ + :param src: the source image, which should be a grayscale or color image. + :param ksize: the size of the kernel used to compute the Laplacian filter, + which can be 1, 3, 5, or 7. + + >>> my_laplacian(src=np.array([]), ksize=0) + Traceback (most recent call last): + ... + ValueError: ksize must be in (1, 3, 5, 7) + """ + kernels = { + 1: np.array([[0, -1, 0], [-1, 4, -1], [0, -1, 0]]), + 3: np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]]), + 5: np.array( + [ + [0, 0, -1, 0, 0], + [0, -1, -2, -1, 0], + [-1, -2, 16, -2, -1], + [0, -1, -2, -1, 0], + [0, 0, -1, 0, 0], + ] + ), + 7: np.array( + [ + [0, 0, 0, -1, 0, 0, 0], + [0, 0, -2, -3, -2, 0, 0], + [0, -2, -7, -10, -7, -2, 0], + [-1, -3, -10, 68, -10, -3, -1], + [0, -2, -7, -10, -7, -2, 0], + [0, 0, -2, -3, -2, 0, 0], + [0, 0, 0, -1, 0, 0, 0], + ] + ), + } + if ksize not in kernels: + msg = f"ksize must be in {tuple(kernels)}" + raise ValueError(msg) + + # Apply the Laplacian kernel using convolution + return filter2D( + src, CV_64F, kernels[ksize], 0, borderType=BORDER_DEFAULT, anchor=(0, 0) + ) + + +if __name__ == "__main__": + # read original image + img = imread(r"../image_data/lena.jpg") + + # turn image in gray scale value + gray = cvtColor(img, COLOR_BGR2GRAY) + + # Applying gaussian filter + blur_image = gaussian_filter(gray, 3, sigma=1) + + # Apply multiple Kernel to detect edges + laplacian_image = my_laplacian(ksize=3, src=blur_image) + + imshow("Original image", img) + imshow("Detected edges using laplacian filter", laplacian_image) + + waitKey(0) From 17af6444497a64dbe803904e2ef27d0e2a280f8c Mon Sep 17 00:00:00 2001 From: JeevaRamanathan <64531160+JeevaRamanathan@users.noreply.github.com> Date: Fri, 6 Oct 2023 05:30:58 +0530 Subject: [PATCH 041/306] Symmetric tree (#9871) * symmectric tree * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * removed trailing spaces * escape sequence fix * added return type * added class * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * wordings fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added static method * added type * added static method * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * wordings fix * testcase added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * testcase added for mirror function * testcase added for mirror function * made the requested changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * made the requested changes * doc test added for symmetric, asymmetric * Update symmetric_tree.py --------- Co-authored-by: jeevaramanthan.m Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- data_structures/binary_tree/symmetric_tree.py | 101 ++++++++++++++++++ 1 file changed, 101 insertions(+) create mode 100644 data_structures/binary_tree/symmetric_tree.py diff --git a/data_structures/binary_tree/symmetric_tree.py b/data_structures/binary_tree/symmetric_tree.py new file mode 100644 index 000000000..331a25849 --- /dev/null +++ b/data_structures/binary_tree/symmetric_tree.py @@ -0,0 +1,101 @@ +""" +Given the root of a binary tree, check whether it is a mirror of itself +(i.e., symmetric around its center). + +Leetcode reference: https://leetcode.com/problems/symmetric-tree/ +""" +from __future__ import annotations + +from dataclasses import dataclass + + +@dataclass +class Node: + """ + A Node has data variable and pointers to Nodes to its left and right. + """ + + data: int + left: Node | None = None + right: Node | None = None + + +def make_symmetric_tree() -> Node: + r""" + Create a symmetric tree for testing. + The tree looks like this: + 1 + / \ + 2 2 + / \ / \ + 3 4 4 3 + """ + root = Node(1) + root.left = Node(2) + root.right = Node(2) + root.left.left = Node(3) + root.left.right = Node(4) + root.right.left = Node(4) + root.right.right = Node(3) + return root + + +def make_asymmetric_tree() -> Node: + r""" + Create a asymmetric tree for testing. + The tree looks like this: + 1 + / \ + 2 2 + / \ / \ + 3 4 3 4 + """ + root = Node(1) + root.left = Node(2) + root.right = Node(2) + root.left.left = Node(3) + root.left.right = Node(4) + root.right.left = Node(3) + root.right.right = Node(4) + return root + + +def is_symmetric_tree(tree: Node) -> bool: + """ + Test cases for is_symmetric_tree function + >>> is_symmetric_tree(make_symmetric_tree()) + True + >>> is_symmetric_tree(make_asymmetric_tree()) + False + """ + if tree: + return is_mirror(tree.left, tree.right) + return True # An empty tree is considered symmetric. + + +def is_mirror(left: Node | None, right: Node | None) -> bool: + """ + >>> tree1 = make_symmetric_tree() + >>> tree1.right.right = Node(3) + >>> is_mirror(tree1.left, tree1.right) + True + >>> tree2 = make_asymmetric_tree() + >>> is_mirror(tree2.left, tree2.right) + False + """ + if left is None and right is None: + # Both sides are empty, which is symmetric. + return True + if left is None or right is None: + # One side is empty while the other is not, which is not symmetric. + return False + if left.data == right.data: + # The values match, so check the subtree + return is_mirror(left.left, right.right) and is_mirror(left.right, right.left) + return False + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From d0c54acd75cedf14cff353869482a0487fea1697 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Fri, 6 Oct 2023 04:31:11 +0200 Subject: [PATCH 042/306] Use dataclasses in singly_linked_list.py (#9886) --- DIRECTORY.md | 7 + .../linked_list/singly_linked_list.py | 151 ++++++++++-------- 2 files changed, 93 insertions(+), 65 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index c199a4329..a975b9264 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -25,6 +25,7 @@ * [Combination Sum](backtracking/combination_sum.py) * [Hamiltonian Cycle](backtracking/hamiltonian_cycle.py) * [Knight Tour](backtracking/knight_tour.py) + * [Match Word Pattern](backtracking/match_word_pattern.py) * [Minimax](backtracking/minimax.py) * [N Queens](backtracking/n_queens.py) * [N Queens Math](backtracking/n_queens_math.py) @@ -199,6 +200,7 @@ * [Red Black Tree](data_structures/binary_tree/red_black_tree.py) * [Segment Tree](data_structures/binary_tree/segment_tree.py) * [Segment Tree Other](data_structures/binary_tree/segment_tree_other.py) + * [Symmetric Tree](data_structures/binary_tree/symmetric_tree.py) * [Treap](data_structures/binary_tree/treap.py) * [Wavelet Tree](data_structures/binary_tree/wavelet_tree.py) * Disjoint Set @@ -277,6 +279,7 @@ * [Convolve](digital_image_processing/filters/convolve.py) * [Gabor Filter](digital_image_processing/filters/gabor_filter.py) * [Gaussian Filter](digital_image_processing/filters/gaussian_filter.py) + * [Laplacian Filter](digital_image_processing/filters/laplacian_filter.py) * [Local Binary Pattern](digital_image_processing/filters/local_binary_pattern.py) * [Median Filter](digital_image_processing/filters/median_filter.py) * [Sobel Filter](digital_image_processing/filters/sobel_filter.py) @@ -365,8 +368,10 @@ * [Ind Reactance](electronics/ind_reactance.py) * [Ohms Law](electronics/ohms_law.py) * [Real And Reactive Power](electronics/real_and_reactive_power.py) + * [Resistor Color Code](electronics/resistor_color_code.py) * [Resistor Equivalence](electronics/resistor_equivalence.py) * [Resonant Frequency](electronics/resonant_frequency.py) + * [Wheatstone Bridge](electronics/wheatstone_bridge.py) ## File Transfer * [Receive File](file_transfer/receive_file.py) @@ -415,6 +420,7 @@ * [Check Bipartite Graph Dfs](graphs/check_bipartite_graph_dfs.py) * [Check Cycle](graphs/check_cycle.py) * [Connected Components](graphs/connected_components.py) + * [Deep Clone Graph](graphs/deep_clone_graph.py) * [Depth First Search](graphs/depth_first_search.py) * [Depth First Search 2](graphs/depth_first_search_2.py) * [Dijkstra](graphs/dijkstra.py) @@ -1159,6 +1165,7 @@ * [Autocomplete Using Trie](strings/autocomplete_using_trie.py) * [Barcode Validator](strings/barcode_validator.py) * [Boyer Moore Search](strings/boyer_moore_search.py) + * [Camel Case To Snake Case](strings/camel_case_to_snake_case.py) * [Can String Be Rearranged As Palindrome](strings/can_string_be_rearranged_as_palindrome.py) * [Capitalize](strings/capitalize.py) * [Check Anagrams](strings/check_anagrams.py) diff --git a/data_structures/linked_list/singly_linked_list.py b/data_structures/linked_list/singly_linked_list.py index f4b2ddce1..2c6713a47 100644 --- a/data_structures/linked_list/singly_linked_list.py +++ b/data_structures/linked_list/singly_linked_list.py @@ -1,27 +1,38 @@ +from __future__ import annotations + +from collections.abc import Iterator +from dataclasses import dataclass from typing import Any +@dataclass class Node: - def __init__(self, data: Any): - """ - Create and initialize Node class instance. - >>> Node(20) - Node(20) - >>> Node("Hello, world!") - Node(Hello, world!) - >>> Node(None) - Node(None) - >>> Node(True) - Node(True) - """ - self.data = data - self.next = None + """ + Create and initialize Node class instance. + >>> Node(20) + Node(20) + >>> Node("Hello, world!") + Node(Hello, world!) + >>> Node(None) + Node(None) + >>> Node(True) + Node(True) + """ + + data: Any + next_node: Node | None = None def __repr__(self) -> str: """ Get the string representation of this node. >>> Node(10).__repr__() 'Node(10)' + >>> repr(Node(10)) + 'Node(10)' + >>> str(Node(10)) + 'Node(10)' + >>> Node(10) + Node(10) """ return f"Node({self.data})" @@ -31,10 +42,12 @@ class LinkedList: """ Create and initialize LinkedList class instance. >>> linked_list = LinkedList() + >>> linked_list.head is None + True """ self.head = None - def __iter__(self) -> Any: + def __iter__(self) -> Iterator[Any]: """ This function is intended for iterators to access and iterate through data inside linked list. @@ -51,7 +64,7 @@ class LinkedList: node = self.head while node: yield node.data - node = node.next + node = node.next_node def __len__(self) -> int: """ @@ -81,9 +94,16 @@ class LinkedList: >>> linked_list.insert_tail(1) >>> linked_list.insert_tail(3) >>> linked_list.__repr__() - '1->3' + '1 -> 3' + >>> repr(linked_list) + '1 -> 3' + >>> str(linked_list) + '1 -> 3' + >>> linked_list.insert_tail(5) + >>> f"{linked_list}" + '1 -> 3 -> 5' """ - return "->".join([str(item) for item in self]) + return " -> ".join([str(item) for item in self]) def __getitem__(self, index: int) -> Any: """ @@ -134,7 +154,7 @@ class LinkedList: raise ValueError("list index out of range.") current = self.head for _ in range(index): - current = current.next + current = current.next_node current.data = data def insert_tail(self, data: Any) -> None: @@ -146,10 +166,10 @@ class LinkedList: tail >>> linked_list.insert_tail("tail_2") >>> linked_list - tail->tail_2 + tail -> tail_2 >>> linked_list.insert_tail("tail_3") >>> linked_list - tail->tail_2->tail_3 + tail -> tail_2 -> tail_3 """ self.insert_nth(len(self), data) @@ -162,10 +182,10 @@ class LinkedList: head >>> linked_list.insert_head("head_2") >>> linked_list - head_2->head + head_2 -> head >>> linked_list.insert_head("head_3") >>> linked_list - head_3->head_2->head + head_3 -> head_2 -> head """ self.insert_nth(0, data) @@ -177,13 +197,13 @@ class LinkedList: >>> linked_list.insert_tail("second") >>> linked_list.insert_tail("third") >>> linked_list - first->second->third + first -> second -> third >>> linked_list.insert_nth(1, "fourth") >>> linked_list - first->fourth->second->third + first -> fourth -> second -> third >>> linked_list.insert_nth(3, "fifth") >>> linked_list - first->fourth->second->fifth->third + first -> fourth -> second -> fifth -> third """ if not 0 <= index <= len(self): raise IndexError("list index out of range") @@ -191,14 +211,14 @@ class LinkedList: if self.head is None: self.head = new_node elif index == 0: - new_node.next = self.head # link new_node to head + new_node.next_node = self.head # link new_node to head self.head = new_node else: temp = self.head for _ in range(index - 1): - temp = temp.next - new_node.next = temp.next - temp.next = new_node + temp = temp.next_node + new_node.next_node = temp.next_node + temp.next_node = new_node def print_list(self) -> None: # print every node data """ @@ -208,7 +228,7 @@ class LinkedList: >>> linked_list.insert_tail("second") >>> linked_list.insert_tail("third") >>> linked_list - first->second->third + first -> second -> third """ print(self) @@ -221,11 +241,11 @@ class LinkedList: >>> linked_list.insert_tail("second") >>> linked_list.insert_tail("third") >>> linked_list - first->second->third + first -> second -> third >>> linked_list.delete_head() 'first' >>> linked_list - second->third + second -> third >>> linked_list.delete_head() 'second' >>> linked_list @@ -248,11 +268,11 @@ class LinkedList: >>> linked_list.insert_tail("second") >>> linked_list.insert_tail("third") >>> linked_list - first->second->third + first -> second -> third >>> linked_list.delete_tail() 'third' >>> linked_list - first->second + first -> second >>> linked_list.delete_tail() 'second' >>> linked_list @@ -275,11 +295,11 @@ class LinkedList: >>> linked_list.insert_tail("second") >>> linked_list.insert_tail("third") >>> linked_list - first->second->third + first -> second -> third >>> linked_list.delete_nth(1) # delete middle 'second' >>> linked_list - first->third + first -> third >>> linked_list.delete_nth(5) # this raises error Traceback (most recent call last): ... @@ -293,13 +313,13 @@ class LinkedList: raise IndexError("List index out of range.") delete_node = self.head # default first node if index == 0: - self.head = self.head.next + self.head = self.head.next_node else: temp = self.head for _ in range(index - 1): - temp = temp.next - delete_node = temp.next - temp.next = temp.next.next + temp = temp.next_node + delete_node = temp.next_node + temp.next_node = temp.next_node.next_node return delete_node.data def is_empty(self) -> bool: @@ -322,22 +342,22 @@ class LinkedList: >>> linked_list.insert_tail("second") >>> linked_list.insert_tail("third") >>> linked_list - first->second->third + first -> second -> third >>> linked_list.reverse() >>> linked_list - third->second->first + third -> second -> first """ prev = None current = self.head while current: # Store the current node's next node. - next_node = current.next - # Make the current node's next point backwards - current.next = prev + next_node = current.next_node + # Make the current node's next_node point backwards + current.next_node = prev # Make the previous node be the current node prev = current - # Make the current node the next node (to progress iteration) + # Make the current node the next_node node (to progress iteration) current = next_node # Return prev in order to put the head at the end self.head = prev @@ -366,17 +386,17 @@ def test_singly_linked_list() -> None: for i in range(10): assert len(linked_list) == i linked_list.insert_nth(i, i + 1) - assert str(linked_list) == "->".join(str(i) for i in range(1, 11)) + assert str(linked_list) == " -> ".join(str(i) for i in range(1, 11)) linked_list.insert_head(0) linked_list.insert_tail(11) - assert str(linked_list) == "->".join(str(i) for i in range(12)) + assert str(linked_list) == " -> ".join(str(i) for i in range(12)) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9) == 10 assert linked_list.delete_tail() == 11 assert len(linked_list) == 9 - assert str(linked_list) == "->".join(str(i) for i in range(1, 10)) + assert str(linked_list) == " -> ".join(str(i) for i in range(1, 10)) assert all(linked_list[i] == i + 1 for i in range(9)) is True @@ -385,7 +405,7 @@ def test_singly_linked_list() -> None: assert all(linked_list[i] == -i for i in range(9)) is True linked_list.reverse() - assert str(linked_list) == "->".join(str(i) for i in range(-8, 1)) + assert str(linked_list) == " -> ".join(str(i) for i in range(-8, 1)) def test_singly_linked_list_2() -> None: @@ -417,56 +437,57 @@ def test_singly_linked_list_2() -> None: # Check if it's empty or not assert linked_list.is_empty() is False assert ( - str(linked_list) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" - "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" + str(linked_list) + == "-9 -> 100 -> Node(77345112) -> dlrow olleH -> 7 -> 5555 -> " + "0 -> -192.55555 -> Hello, world! -> 77.9 -> Node(10) -> None -> None -> 12.2" ) # Delete the head result = linked_list.delete_head() assert result == -9 assert ( - str(linked_list) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" - "Hello, world!->77.9->Node(10)->None->None->12.2" + str(linked_list) == "100 -> Node(77345112) -> dlrow olleH -> 7 -> 5555 -> 0 -> " + "-192.55555 -> Hello, world! -> 77.9 -> Node(10) -> None -> None -> 12.2" ) # Delete the tail result = linked_list.delete_tail() assert result == 12.2 assert ( - str(linked_list) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" - "Hello, world!->77.9->Node(10)->None->None" + str(linked_list) == "100 -> Node(77345112) -> dlrow olleH -> 7 -> 5555 -> 0 -> " + "-192.55555 -> Hello, world! -> 77.9 -> Node(10) -> None -> None" ) # Delete a node in specific location in linked list result = linked_list.delete_nth(10) assert result is None assert ( - str(linked_list) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" - "Hello, world!->77.9->Node(10)->None" + str(linked_list) == "100 -> Node(77345112) -> dlrow olleH -> 7 -> 5555 -> 0 -> " + "-192.55555 -> Hello, world! -> 77.9 -> Node(10) -> None" ) # Add a Node instance to its head linked_list.insert_head(Node("Hello again, world!")) assert ( str(linked_list) - == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" - "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" + == "Node(Hello again, world!) -> 100 -> Node(77345112) -> dlrow olleH -> " + "7 -> 5555 -> 0 -> -192.55555 -> Hello, world! -> 77.9 -> Node(10) -> None" ) # Add None to its tail linked_list.insert_tail(None) assert ( str(linked_list) - == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" - "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" + == "Node(Hello again, world!) -> 100 -> Node(77345112) -> dlrow olleH -> 7 -> " + "5555 -> 0 -> -192.55555 -> Hello, world! -> 77.9 -> Node(10) -> None -> None" ) # Reverse the linked list linked_list.reverse() assert ( str(linked_list) - == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" - "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" + == "None -> None -> Node(10) -> 77.9 -> Hello, world! -> -192.55555 -> 0 -> " + "5555 -> 7 -> dlrow olleH -> Node(77345112) -> 100 -> Node(Hello again, world!)" ) From 795e97e87f6760a693769097613ace56a6addc8d Mon Sep 17 00:00:00 2001 From: Sarvjeet Singh <63469455+aazad20@users.noreply.github.com> Date: Fri, 6 Oct 2023 19:19:34 +0530 Subject: [PATCH 043/306] Added Majority Voting Algorithm (#9866) * Create MajorityVoteAlgorithm.py * Update and rename MajorityVoteAlgorithm.py to majorityvotealgorithm.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update and rename majorityvotealgorithm.py to majority_vote_algorithm.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update majority_vote_algorithm.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update majority_vote_algorithm.py * Update majority_vote_algorithm.py * Update other/majority_vote_algorithm.py Co-authored-by: Christian Clauss * renaming variables majority_vote_algorithm.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update majority_vote_algorithm.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update majority_vote_algorithm.py * Update majority_vote_algorithm.py * Update majority_vote_algorithm.py * Update majority_vote_algorithm.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update other/majority_vote_algorithm.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update other/majority_vote_algorithm.py Co-authored-by: Christian Clauss * adding more testcases majority_vote_algorithm.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update majority_vote_algorithm.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update majority_vote_algorithm.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- other/majority_vote_algorithm.py | 37 ++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 other/majority_vote_algorithm.py diff --git a/other/majority_vote_algorithm.py b/other/majority_vote_algorithm.py new file mode 100644 index 000000000..ab8b386dd --- /dev/null +++ b/other/majority_vote_algorithm.py @@ -0,0 +1,37 @@ +""" +This is Booyer-Moore Majority Vote Algorithm. The problem statement goes like this: +Given an integer array of size n, find all elements that appear more than ⌊ n/k ⌋ times. +We have to solve in O(n) time and O(1) Space. +URL : https://en.wikipedia.org/wiki/Boyer%E2%80%93Moore_majority_vote_algorithm +""" +from collections import Counter + + +def majority_vote(votes: list[int], votes_needed_to_win: int) -> list[int]: + """ + >>> majority_vote([1, 2, 2, 3, 1, 3, 2], 3) + [2] + >>> majority_vote([1, 2, 2, 3, 1, 3, 2], 2) + [] + >>> majority_vote([1, 2, 2, 3, 1, 3, 2], 4) + [1, 2, 3] + """ + majority_candidate_counter: Counter[int] = Counter() + for vote in votes: + majority_candidate_counter[vote] += 1 + if len(majority_candidate_counter) == votes_needed_to_win: + majority_candidate_counter -= Counter(set(majority_candidate_counter)) + majority_candidate_counter = Counter( + vote for vote in votes if vote in majority_candidate_counter + ) + return [ + vote + for vote in majority_candidate_counter + if majority_candidate_counter[vote] > len(votes) / votes_needed_to_win + ] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 995c5533c645250c120b11f0eddc53909fc3d012 Mon Sep 17 00:00:00 2001 From: fxdup <47389903+fxdup@users.noreply.github.com> Date: Fri, 6 Oct 2023 14:46:58 -0400 Subject: [PATCH 044/306] Consolidate gamma (#9769) * refactor(gamma): Append _iterative to func name * refactor(gamma): Consolidate implementations * refactor(gamma): Redundant test function removal * Update maths/gamma.py --------- Co-authored-by: Tianyi Zheng --- maths/gamma.py | 91 ++++++++++++++++++++++++++++++++++------ maths/gamma_recursive.py | 77 ---------------------------------- 2 files changed, 79 insertions(+), 89 deletions(-) delete mode 100644 maths/gamma_recursive.py diff --git a/maths/gamma.py b/maths/gamma.py index d5debc587..822bbc744 100644 --- a/maths/gamma.py +++ b/maths/gamma.py @@ -1,35 +1,43 @@ +""" +Gamma function is a very useful tool in math and physics. +It helps calculating complex integral in a convenient way. +for more info: https://en.wikipedia.org/wiki/Gamma_function +In mathematics, the gamma function is one commonly +used extension of the factorial function to complex numbers. +The gamma function is defined for all complex numbers except +the non-positive integers +Python's Standard Library math.gamma() function overflows around gamma(171.624). +""" import math from numpy import inf from scipy.integrate import quad -def gamma(num: float) -> float: +def gamma_iterative(num: float) -> float: """ - https://en.wikipedia.org/wiki/Gamma_function - In mathematics, the gamma function is one commonly - used extension of the factorial function to complex numbers. - The gamma function is defined for all complex numbers except the non-positive - integers - >>> gamma(-1) + Calculates the value of Gamma function of num + where num is either an integer (1, 2, 3..) or a half-integer (0.5, 1.5, 2.5 ...). + + >>> gamma_iterative(-1) Traceback (most recent call last): ... ValueError: math domain error - >>> gamma(0) + >>> gamma_iterative(0) Traceback (most recent call last): ... ValueError: math domain error - >>> gamma(9) + >>> gamma_iterative(9) 40320.0 >>> from math import gamma as math_gamma - >>> all(.99999999 < gamma(i) / math_gamma(i) <= 1.000000001 + >>> all(.99999999 < gamma_iterative(i) / math_gamma(i) <= 1.000000001 ... for i in range(1, 50)) True - >>> gamma(-1)/math_gamma(-1) <= 1.000000001 + >>> gamma_iterative(-1)/math_gamma(-1) <= 1.000000001 Traceback (most recent call last): ... ValueError: math domain error - >>> gamma(3.3) - math_gamma(3.3) <= 0.00000001 + >>> gamma_iterative(3.3) - math_gamma(3.3) <= 0.00000001 True """ if num <= 0: @@ -42,7 +50,66 @@ def integrand(x: float, z: float) -> float: return math.pow(x, z - 1) * math.exp(-x) +def gamma_recursive(num: float) -> float: + """ + Calculates the value of Gamma function of num + where num is either an integer (1, 2, 3..) or a half-integer (0.5, 1.5, 2.5 ...). + Implemented using recursion + Examples: + >>> from math import isclose, gamma as math_gamma + >>> gamma_recursive(0.5) + 1.7724538509055159 + >>> gamma_recursive(1) + 1.0 + >>> gamma_recursive(2) + 1.0 + >>> gamma_recursive(3.5) + 3.3233509704478426 + >>> gamma_recursive(171.5) + 9.483367566824795e+307 + >>> all(isclose(gamma_recursive(num), math_gamma(num)) + ... for num in (0.5, 2, 3.5, 171.5)) + True + >>> gamma_recursive(0) + Traceback (most recent call last): + ... + ValueError: math domain error + >>> gamma_recursive(-1.1) + Traceback (most recent call last): + ... + ValueError: math domain error + >>> gamma_recursive(-4) + Traceback (most recent call last): + ... + ValueError: math domain error + >>> gamma_recursive(172) + Traceback (most recent call last): + ... + OverflowError: math range error + >>> gamma_recursive(1.1) + Traceback (most recent call last): + ... + NotImplementedError: num must be an integer or a half-integer + """ + if num <= 0: + raise ValueError("math domain error") + if num > 171.5: + raise OverflowError("math range error") + elif num - int(num) not in (0, 0.5): + raise NotImplementedError("num must be an integer or a half-integer") + elif num == 0.5: + return math.sqrt(math.pi) + else: + return 1.0 if num == 1 else (num - 1) * gamma_recursive(num - 1) + + if __name__ == "__main__": from doctest import testmod testmod() + num = 1.0 + while num: + num = float(input("Gamma of: ")) + print(f"gamma_iterative({num}) = {gamma_iterative(num)}") + print(f"gamma_recursive({num}) = {gamma_recursive(num)}") + print("\nEnter 0 to exit...") diff --git a/maths/gamma_recursive.py b/maths/gamma_recursive.py deleted file mode 100644 index 3d6b8c5e8..000000000 --- a/maths/gamma_recursive.py +++ /dev/null @@ -1,77 +0,0 @@ -""" -Gamma function is a very useful tool in math and physics. -It helps calculating complex integral in a convenient way. -for more info: https://en.wikipedia.org/wiki/Gamma_function -Python's Standard Library math.gamma() function overflows around gamma(171.624). -""" -from math import pi, sqrt - - -def gamma(num: float) -> float: - """ - Calculates the value of Gamma function of num - where num is either an integer (1, 2, 3..) or a half-integer (0.5, 1.5, 2.5 ...). - Implemented using recursion - Examples: - >>> from math import isclose, gamma as math_gamma - >>> gamma(0.5) - 1.7724538509055159 - >>> gamma(2) - 1.0 - >>> gamma(3.5) - 3.3233509704478426 - >>> gamma(171.5) - 9.483367566824795e+307 - >>> all(isclose(gamma(num), math_gamma(num)) for num in (0.5, 2, 3.5, 171.5)) - True - >>> gamma(0) - Traceback (most recent call last): - ... - ValueError: math domain error - >>> gamma(-1.1) - Traceback (most recent call last): - ... - ValueError: math domain error - >>> gamma(-4) - Traceback (most recent call last): - ... - ValueError: math domain error - >>> gamma(172) - Traceback (most recent call last): - ... - OverflowError: math range error - >>> gamma(1.1) - Traceback (most recent call last): - ... - NotImplementedError: num must be an integer or a half-integer - """ - if num <= 0: - raise ValueError("math domain error") - if num > 171.5: - raise OverflowError("math range error") - elif num - int(num) not in (0, 0.5): - raise NotImplementedError("num must be an integer or a half-integer") - elif num == 0.5: - return sqrt(pi) - else: - return 1.0 if num == 1 else (num - 1) * gamma(num - 1) - - -def test_gamma() -> None: - """ - >>> test_gamma() - """ - assert gamma(0.5) == sqrt(pi) - assert gamma(1) == 1.0 - assert gamma(2) == 1.0 - - -if __name__ == "__main__": - from doctest import testmod - - testmod() - num = 1.0 - while num: - num = float(input("Gamma of: ")) - print(f"gamma({num}) = {gamma(num)}") - print("\nEnter 0 to exit...") From c6ec99d57140cbf8b54077d379dfffeb6c7ad280 Mon Sep 17 00:00:00 2001 From: Kausthub Kannan Date: Sat, 7 Oct 2023 00:53:05 +0530 Subject: [PATCH 045/306] Added Mish Activation Function (#9942) * Added Mish Activation Function * Apply suggestions from code review --------- Co-authored-by: Tianyi Zheng --- neural_network/activation_functions/mish.py | 39 +++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 neural_network/activation_functions/mish.py diff --git a/neural_network/activation_functions/mish.py b/neural_network/activation_functions/mish.py new file mode 100644 index 000000000..e4f98307f --- /dev/null +++ b/neural_network/activation_functions/mish.py @@ -0,0 +1,39 @@ +""" +Mish Activation Function + +Use Case: Improved version of the ReLU activation function used in Computer Vision. +For more detailed information, you can refer to the following link: +https://en.wikipedia.org/wiki/Rectifier_(neural_networks)#Mish +""" + +import numpy as np + + +def mish(vector: np.ndarray) -> np.ndarray: + """ + Implements the Mish activation function. + + Parameters: + vector (np.ndarray): The input array for Mish activation. + + Returns: + np.ndarray: The input array after applying the Mish activation. + + Formula: + f(x) = x * tanh(softplus(x)) = x * tanh(ln(1 + e^x)) + + Examples: + >>> mish(vector=np.array([2.3,0.6,-2,-3.8])) + array([ 2.26211893, 0.46613649, -0.25250148, -0.08405831]) + + >>> mish(np.array([-9.2, -0.3, 0.45, -4.56])) + array([-0.00092952, -0.15113318, 0.33152014, -0.04745745]) + + """ + return vector * np.tanh(np.log(1 + np.exp(vector))) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 80a2087e0aa349b81fb6bbc5d73dae920f560e75 Mon Sep 17 00:00:00 2001 From: Kausthub Kannan Date: Sat, 7 Oct 2023 01:56:09 +0530 Subject: [PATCH 046/306] Added Softplus activation function (#9944) --- .../activation_functions/softplus.py | 37 +++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 neural_network/activation_functions/softplus.py diff --git a/neural_network/activation_functions/softplus.py b/neural_network/activation_functions/softplus.py new file mode 100644 index 000000000..35fdf41af --- /dev/null +++ b/neural_network/activation_functions/softplus.py @@ -0,0 +1,37 @@ +""" +Softplus Activation Function + +Use Case: The Softplus function is a smooth approximation of the ReLU function. +For more detailed information, you can refer to the following link: +https://en.wikipedia.org/wiki/Rectifier_(neural_networks)#Softplus +""" + +import numpy as np + + +def softplus(vector: np.ndarray) -> np.ndarray: + """ + Implements the Softplus activation function. + + Parameters: + vector (np.ndarray): The input array for the Softplus activation. + + Returns: + np.ndarray: The input array after applying the Softplus activation. + + Formula: f(x) = ln(1 + e^x) + + Examples: + >>> softplus(np.array([2.3, 0.6, -2, -3.8])) + array([2.39554546, 1.03748795, 0.12692801, 0.02212422]) + + >>> softplus(np.array([-9.2, -0.3, 0.45, -4.56])) + array([1.01034298e-04, 5.54355244e-01, 9.43248946e-01, 1.04077103e-02]) + """ + return np.log(1 + np.exp(vector)) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 2122474e41f2b85500e1f9347d98c9efc15aba4e Mon Sep 17 00:00:00 2001 From: Kamil <32775019+quant12345@users.noreply.github.com> Date: Sat, 7 Oct 2023 14:09:39 +0500 Subject: [PATCH 047/306] Segmented sieve - doctests (#9945) * Replacing the generator with numpy vector operations from lu_decomposition. * Revert "Replacing the generator with numpy vector operations from lu_decomposition." This reverts commit ad217c66165898d62b76cc89ba09c2d7049b6448. * Added doctests. * Update segmented_sieve.py Removed unnecessary check. * Update segmented_sieve.py Added checks for 0 and negative numbers. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update segmented_sieve.py * Update segmented_sieve.py Added float number check. * Update segmented_sieve.py * Update segmented_sieve.py simplified verification * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update segmented_sieve.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update segmented_sieve.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ValueError: Number 22.2 must instead be a positive integer --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/segmented_sieve.py | 38 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/maths/segmented_sieve.py b/maths/segmented_sieve.py index e950a83b7..125390edc 100644 --- a/maths/segmented_sieve.py +++ b/maths/segmented_sieve.py @@ -4,7 +4,36 @@ import math def sieve(n: int) -> list[int]: - """Segmented Sieve.""" + """ + Segmented Sieve. + + Examples: + >>> sieve(8) + [2, 3, 5, 7] + + >>> sieve(27) + [2, 3, 5, 7, 11, 13, 17, 19, 23] + + >>> sieve(0) + Traceback (most recent call last): + ... + ValueError: Number 0 must instead be a positive integer + + >>> sieve(-1) + Traceback (most recent call last): + ... + ValueError: Number -1 must instead be a positive integer + + >>> sieve(22.2) + Traceback (most recent call last): + ... + ValueError: Number 22.2 must instead be a positive integer + """ + + if n <= 0 or isinstance(n, float): + msg = f"Number {n} must instead be a positive integer" + raise ValueError(msg) + in_prime = [] start = 2 end = int(math.sqrt(n)) # Size of every segment @@ -42,4 +71,9 @@ def sieve(n: int) -> list[int]: return prime -print(sieve(10**6)) +if __name__ == "__main__": + import doctest + + doctest.testmod() + + print(f"{sieve(10**6) = }") From 678e0aa8cfdaae1d17536fdcf489bebe1e12cfc6 Mon Sep 17 00:00:00 2001 From: Saahil Mahato <115351000+saahil-mahato@users.noreply.github.com> Date: Sat, 7 Oct 2023 15:20:23 +0545 Subject: [PATCH 048/306] Mention square matrices in strassen docs and make it more clear (#9839) * refactor: fix strassen matrix multiplication docs * refactor: make docs more clear --- divide_and_conquer/strassen_matrix_multiplication.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/divide_and_conquer/strassen_matrix_multiplication.py b/divide_and_conquer/strassen_matrix_multiplication.py index 1d03950ef..f529a255d 100644 --- a/divide_and_conquer/strassen_matrix_multiplication.py +++ b/divide_and_conquer/strassen_matrix_multiplication.py @@ -74,7 +74,7 @@ def print_matrix(matrix: list) -> None: def actual_strassen(matrix_a: list, matrix_b: list) -> list: """ Recursive function to calculate the product of two matrices, using the Strassen - Algorithm. It only supports even length matrices. + Algorithm. It only supports square matrices of any size that is a power of 2. """ if matrix_dimensions(matrix_a) == (2, 2): return default_matrix_multiplication(matrix_a, matrix_b) @@ -129,8 +129,8 @@ def strassen(matrix1: list, matrix2: list) -> list: new_matrix1 = matrix1 new_matrix2 = matrix2 - # Adding zeros to the matrices so that the arrays dimensions are the same and also - # power of 2 + # Adding zeros to the matrices to convert them both into square matrices of equal + # dimensions that are a power of 2 for i in range(maxim): if i < dimension1[0]: for _ in range(dimension1[1], maxim): From 78af0c43c623332029c9ad1d240d81577aac5d72 Mon Sep 17 00:00:00 2001 From: Pronay Debnath Date: Sat, 7 Oct 2023 21:21:30 +0530 Subject: [PATCH 049/306] Create fractional_cover_problem.py (#9973) * Create fractional_cover_problem.py * Update fractional_cover_problem.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fractional_cover_problem.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fractional_cover_problem.py * Update fractional_cover_problem.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fractional_cover_problem.py * Update fractional_cover_problem.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fractional_cover_problem.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Lose __eq__() * Update fractional_cover_problem.py * Define Item property ratio --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- greedy_methods/fractional_cover_problem.py | 102 +++++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100644 greedy_methods/fractional_cover_problem.py diff --git a/greedy_methods/fractional_cover_problem.py b/greedy_methods/fractional_cover_problem.py new file mode 100644 index 000000000..e37c363f1 --- /dev/null +++ b/greedy_methods/fractional_cover_problem.py @@ -0,0 +1,102 @@ +# https://en.wikipedia.org/wiki/Set_cover_problem + +from dataclasses import dataclass +from operator import attrgetter + + +@dataclass +class Item: + weight: int + value: int + + @property + def ratio(self) -> float: + """ + Return the value-to-weight ratio for the item. + + Returns: + float: The value-to-weight ratio for the item. + + Examples: + >>> Item(10, 65).ratio + 6.5 + + >>> Item(20, 100).ratio + 5.0 + + >>> Item(30, 120).ratio + 4.0 + """ + return self.value / self.weight + + +def fractional_cover(items: list[Item], capacity: int) -> float: + """ + Solve the Fractional Cover Problem. + + Args: + items: A list of items, where each item has weight and value attributes. + capacity: The maximum weight capacity of the knapsack. + + Returns: + The maximum value that can be obtained by selecting fractions of items to cover + the knapsack's capacity. + + Raises: + ValueError: If capacity is negative. + + Examples: + >>> fractional_cover((Item(10, 60), Item(20, 100), Item(30, 120)), capacity=50) + 240.0 + + >>> fractional_cover([Item(20, 100), Item(30, 120), Item(10, 60)], capacity=25) + 135.0 + + >>> fractional_cover([Item(10, 60), Item(20, 100), Item(30, 120)], capacity=60) + 280.0 + + >>> fractional_cover(items=[Item(5, 30), Item(10, 60), Item(15, 90)], capacity=30) + 180.0 + + >>> fractional_cover(items=[], capacity=50) + 0.0 + + >>> fractional_cover(items=[Item(10, 60)], capacity=5) + 30.0 + + >>> fractional_cover(items=[Item(10, 60)], capacity=1) + 6.0 + + >>> fractional_cover(items=[Item(10, 60)], capacity=0) + 0.0 + + >>> fractional_cover(items=[Item(10, 60)], capacity=-1) + Traceback (most recent call last): + ... + ValueError: Capacity cannot be negative + """ + if capacity < 0: + raise ValueError("Capacity cannot be negative") + + total_value = 0.0 + remaining_capacity = capacity + + # Sort the items by their value-to-weight ratio in descending order + for item in sorted(items, key=attrgetter("ratio"), reverse=True): + if remaining_capacity == 0: + break + + weight_taken = min(item.weight, remaining_capacity) + total_value += weight_taken * item.ratio + remaining_capacity -= weight_taken + + return total_value + + +if __name__ == "__main__": + import doctest + + if result := doctest.testmod().failed: + print(f"{result} test(s) failed") + else: + print("All tests passed") From 112daddc4de91d60bbdd3201fc9a6a4afc60f57a Mon Sep 17 00:00:00 2001 From: dhruvtrigotra <72982592+dhruvtrigotra@users.noreply.github.com> Date: Sun, 8 Oct 2023 00:34:24 +0530 Subject: [PATCH 050/306] charging_capacitor (#10016) * charging_capacitor * charging_capacitor * Final edits --------- Co-authored-by: Christian Clauss --- electronics/charging_capacitor.py | 71 +++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 electronics/charging_capacitor.py diff --git a/electronics/charging_capacitor.py b/electronics/charging_capacitor.py new file mode 100644 index 000000000..4029b0ecf --- /dev/null +++ b/electronics/charging_capacitor.py @@ -0,0 +1,71 @@ +# source - The ARRL Handbook for Radio Communications +# https://en.wikipedia.org/wiki/RC_time_constant + +""" +Description +----------- +When a capacitor is connected with a potential source (AC or DC). It starts to charge +at a general speed but when a resistor is connected in the circuit with in series to +a capacitor then the capacitor charges slowly means it will take more time than usual. +while the capacitor is being charged, the voltage is in exponential function with time. + +'resistance(ohms) * capacitance(farads)' is called RC-timeconstant which may also be +represented as τ (tau). By using this RC-timeconstant we can find the voltage at any +time 't' from the initiation of charging a capacitor with the help of the exponential +function containing RC. Both at charging and discharging of a capacitor. +""" +from math import exp # value of exp = 2.718281828459… + + +def charging_capacitor( + source_voltage: float, # voltage in volts. + resistance: float, # resistance in ohms. + capacitance: float, # capacitance in farads. + time_sec: float, # time in seconds after charging initiation of capacitor. +) -> float: + """ + Find capacitor voltage at any nth second after initiating its charging. + + Examples + -------- + >>> charging_capacitor(source_voltage=.2,resistance=.9,capacitance=8.4,time_sec=.5) + 0.013 + + >>> charging_capacitor(source_voltage=2.2,resistance=3.5,capacitance=2.4,time_sec=9) + 1.446 + + >>> charging_capacitor(source_voltage=15,resistance=200,capacitance=20,time_sec=2) + 0.007 + + >>> charging_capacitor(20, 2000, 30*pow(10,-5), 4) + 19.975 + + >>> charging_capacitor(source_voltage=0,resistance=10.0,capacitance=.30,time_sec=3) + Traceback (most recent call last): + ... + ValueError: Source voltage must be positive. + + >>> charging_capacitor(source_voltage=20,resistance=-2000,capacitance=30,time_sec=4) + Traceback (most recent call last): + ... + ValueError: Resistance must be positive. + + >>> charging_capacitor(source_voltage=30,resistance=1500,capacitance=0,time_sec=4) + Traceback (most recent call last): + ... + ValueError: Capacitance must be positive. + """ + + if source_voltage <= 0: + raise ValueError("Source voltage must be positive.") + if resistance <= 0: + raise ValueError("Resistance must be positive.") + if capacitance <= 0: + raise ValueError("Capacitance must be positive.") + return round(source_voltage * (1 - exp(-time_sec / (resistance * capacitance))), 3) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 60291738d2552999545c414bb8a8e90f86c69678 Mon Sep 17 00:00:00 2001 From: Kosuri L Indu <118645569+kosuri-indu@users.noreply.github.com> Date: Sun, 8 Oct 2023 00:38:38 +0530 Subject: [PATCH 051/306] add : trapped water program under dynamic programming (#10027) * to add the trapped water program * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * to make changes for error : B006 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * to make changes for error : B006 * to make changes for error : B006 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * to make changes in doctest * to make changes in doctest * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dynamic_programming/trapped_water.py Co-authored-by: Christian Clauss * Update dynamic_programming/trapped_water.py Co-authored-by: Christian Clauss * to make changes in parameters * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * to make changes in parameters * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dynamic_programming/trapped_water.py Co-authored-by: Christian Clauss * to make changes in parameters * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * for negative heights * Update dynamic_programming/trapped_water.py Co-authored-by: Christian Clauss * to remove falsy * Final edits * tuple[int, ...] --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- dynamic_programming/trapped_water.py | 60 ++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 dynamic_programming/trapped_water.py diff --git a/dynamic_programming/trapped_water.py b/dynamic_programming/trapped_water.py new file mode 100644 index 000000000..8bec9fac5 --- /dev/null +++ b/dynamic_programming/trapped_water.py @@ -0,0 +1,60 @@ +""" +Given an array of non-negative integers representing an elevation map where the width +of each bar is 1, this program calculates how much rainwater can be trapped. + +Example - height = (0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1) +Output: 6 +This problem can be solved using the concept of "DYNAMIC PROGRAMMING". + +We calculate the maximum height of bars on the left and right of every bar in array. +Then iterate over the width of structure and at each index. +The amount of water that will be stored is equal to minimum of maximum height of bars +on both sides minus height of bar at current position. +""" + + +def trapped_rainwater(heights: tuple[int, ...]) -> int: + """ + The trapped_rainwater function calculates the total amount of rainwater that can be + trapped given an array of bar heights. + It uses a dynamic programming approach, determining the maximum height of bars on + both sides for each bar, and then computing the trapped water above each bar. + The function returns the total trapped water. + + >>> trapped_rainwater((0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1)) + 6 + >>> trapped_rainwater((7, 1, 5, 3, 6, 4)) + 9 + >>> trapped_rainwater((7, 1, 5, 3, 6, -1)) + Traceback (most recent call last): + ... + ValueError: No height can be negative + """ + if not heights: + return 0 + if any(h < 0 for h in heights): + raise ValueError("No height can be negative") + length = len(heights) + + left_max = [0] * length + left_max[0] = heights[0] + for i, height in enumerate(heights[1:], start=1): + left_max[i] = max(height, left_max[i - 1]) + + right_max = [0] * length + right_max[-1] = heights[-1] + for i in range(length - 2, -1, -1): + right_max[i] = max(heights[i], right_max[i + 1]) + + return sum( + min(left, right) - height + for left, right, height in zip(left_max, right_max, heights) + ) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + print(f"{trapped_rainwater((0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1)) = }") + print(f"{trapped_rainwater((7, 1, 5, 3, 6, 4)) = }") From 895dffb412d80f29c65a062bf6d91fd2a70d8818 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 7 Oct 2023 21:32:28 +0200 Subject: [PATCH 052/306] [pre-commit.ci] pre-commit autoupdate (#9543) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.0.291 → v0.0.292](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.291...v0.0.292) - [github.com/codespell-project/codespell: v2.2.5 → v2.2.6](https://github.com/codespell-project/codespell/compare/v2.2.5...v2.2.6) - [github.com/tox-dev/pyproject-fmt: 1.1.0 → 1.2.0](https://github.com/tox-dev/pyproject-fmt/compare/1.1.0...1.2.0) * updating DIRECTORY.md * Fix typos in test_min_spanning_tree_prim.py * Fix typos * codespell --ignore-words-list=manuel --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Tianyi Zheng Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 2 +- .../cnn_classification.py.DISABLED.txt | 4 +-- computer_vision/mosaic_augmentation.py | 2 +- dynamic_programming/min_distance_up_bottom.py | 11 +++--- graphs/tests/test_min_spanning_tree_prim.py | 8 ++--- hashes/sha1.py | 36 ++++++++++--------- maths/pi_generator.py | 31 +++++++--------- maths/radians.py | 4 +-- maths/square_root.py | 7 ++-- neural_network/convolution_neural_network.py | 8 ++--- neural_network/gan.py_tf | 2 +- other/graham_scan.py | 8 ++--- other/linear_congruential_generator.py | 4 +-- other/password.py | 12 +++---- physics/speed_of_sound.py | 32 ++++++++--------- project_euler/problem_035/sol1.py | 12 +++---- project_euler/problem_135/sol1.py | 30 +++++++--------- project_euler/problem_493/sol1.py | 2 +- pyproject.toml | 2 +- 19 files changed, 98 insertions(+), 119 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index dbf7ff341..8a88dcc07 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -26,7 +26,7 @@ repos: - id: black - repo: https://github.com/codespell-project/codespell - rev: v2.2.5 + rev: v2.2.6 hooks: - id: codespell additional_dependencies: diff --git a/computer_vision/cnn_classification.py.DISABLED.txt b/computer_vision/cnn_classification.py.DISABLED.txt index 9b5f8c95e..b813b7103 100644 --- a/computer_vision/cnn_classification.py.DISABLED.txt +++ b/computer_vision/cnn_classification.py.DISABLED.txt @@ -11,10 +11,10 @@ Download dataset from : https://lhncbc.nlm.nih.gov/LHC-publications/pubs/TuberculosisChestXrayImageDataSets.html 1. Download the dataset folder and create two folder training set and test set -in the parent dataste folder +in the parent dataset folder 2. Move 30-40 image from both TB positive and TB Negative folder in the test set folder -3. The labels of the iamges will be extracted from the folder name +3. The labels of the images will be extracted from the folder name the image is present in. """ diff --git a/computer_vision/mosaic_augmentation.py b/computer_vision/mosaic_augmentation.py index c150126d6..cd923dfe0 100644 --- a/computer_vision/mosaic_augmentation.py +++ b/computer_vision/mosaic_augmentation.py @@ -8,7 +8,7 @@ from string import ascii_lowercase, digits import cv2 import numpy as np -# Parrameters +# Parameters OUTPUT_SIZE = (720, 1280) # Height, Width SCALE_RANGE = (0.4, 0.6) # if height or width lower than this scale, drop it. FILTER_TINY_SCALE = 1 / 100 diff --git a/dynamic_programming/min_distance_up_bottom.py b/dynamic_programming/min_distance_up_bottom.py index 4870c7ef4..6b38a41a1 100644 --- a/dynamic_programming/min_distance_up_bottom.py +++ b/dynamic_programming/min_distance_up_bottom.py @@ -1,11 +1,8 @@ """ Author : Alexander Pantyukhin Date : October 14, 2022 -This is implementation Dynamic Programming up bottom approach -to find edit distance. -The aim is to demonstate up bottom approach for solving the task. -The implementation was tested on the -leetcode: https://leetcode.com/problems/edit-distance/ +This is an implementation of the up-bottom approach to find edit distance. +The implementation was tested on Leetcode: https://leetcode.com/problems/edit-distance/ Levinstein distance Dynamic Programming: up -> down. @@ -30,10 +27,10 @@ def min_distance_up_bottom(word1: str, word2: str) -> int: @functools.cache def min_distance(index1: int, index2: int) -> int: - # if first word index is overflow - delete all from the second word + # if first word index overflows - delete all from the second word if index1 >= len_word1: return len_word2 - index2 - # if second word index is overflow - delete all from the first word + # if second word index overflows - delete all from the first word if index2 >= len_word2: return len_word1 - index1 diff = int(word1[index1] != word2[index2]) # current letters not identical diff --git a/graphs/tests/test_min_spanning_tree_prim.py b/graphs/tests/test_min_spanning_tree_prim.py index 91feab28f..66e5706da 100644 --- a/graphs/tests/test_min_spanning_tree_prim.py +++ b/graphs/tests/test_min_spanning_tree_prim.py @@ -22,12 +22,12 @@ def test_prim_successful_result(): [1, 7, 11], ] - adjancency = defaultdict(list) + adjacency = defaultdict(list) for node1, node2, cost in edges: - adjancency[node1].append([node2, cost]) - adjancency[node2].append([node1, cost]) + adjacency[node1].append([node2, cost]) + adjacency[node2].append([node1, cost]) - result = mst(adjancency) + result = mst(adjacency) expected = [ [7, 6, 1], diff --git a/hashes/sha1.py b/hashes/sha1.py index 8a03673f3..a0fa688f8 100644 --- a/hashes/sha1.py +++ b/hashes/sha1.py @@ -1,26 +1,28 @@ """ -Demonstrates implementation of SHA1 Hash function in a Python class and gives utilities -to find hash of string or hash of text from a file. +Implementation of the SHA1 hash function and gives utilities to find hash of string or +hash of text from a file. Also contains a Test class to verify that the generated hash +matches what is returned by the hashlib library + Usage: python sha1.py --string "Hello World!!" python sha1.py --file "hello_world.txt" When run without any arguments, it prints the hash of the string "Hello World!! Welcome to Cryptography" -Also contains a Test class to verify that the generated Hash is same as that -returned by the hashlib library -SHA1 hash or SHA1 sum of a string is a cryptographic function which means it is easy +SHA1 hash or SHA1 sum of a string is a cryptographic function, which means it is easy to calculate forwards but extremely difficult to calculate backwards. What this means -is, you can easily calculate the hash of a string, but it is extremely difficult to -know the original string if you have its hash. This property is useful to communicate -securely, send encrypted messages and is very useful in payment systems, blockchain -and cryptocurrency etc. -The Algorithm as described in the reference: +is you can easily calculate the hash of a string, but it is extremely difficult to know +the original string if you have its hash. This property is useful for communicating +securely, send encrypted messages and is very useful in payment systems, blockchain and +cryptocurrency etc. + +The algorithm as described in the reference: First we start with a message. The message is padded and the length of the message is added to the end. It is then split into blocks of 512 bits or 64 bytes. The blocks are then processed one at a time. Each block must be expanded and compressed. -The value after each compression is added to a 160bit buffer called the current hash -state. After the last block is processed the current hash state is returned as +The value after each compression is added to a 160-bit buffer called the current hash +state. After the last block is processed, the current hash state is returned as the final hash. + Reference: https://deadhacker.com/2006/02/21/sha-1-illustrated/ """ import argparse @@ -30,18 +32,18 @@ import struct class SHA1Hash: """ - Class to contain the entire pipeline for SHA1 Hashing Algorithm + Class to contain the entire pipeline for SHA1 hashing algorithm >>> SHA1Hash(bytes('Allan', 'utf-8')).final_hash() '872af2d8ac3d8695387e7c804bf0e02c18df9e6e' """ def __init__(self, data): """ - Inititates the variables data and h. h is a list of 5 8-digit Hexadecimal + Initiates the variables data and h. h is a list of 5 8-digit hexadecimal numbers corresponding to (1732584193, 4023233417, 2562383102, 271733878, 3285377520) respectively. We will start with this as a message digest. 0x is how you write - Hexadecimal numbers in Python + hexadecimal numbers in Python """ self.data = data self.h = [0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0] @@ -90,7 +92,7 @@ class SHA1Hash: For each block, the variable h that was initialized is copied to a,b,c,d,e and these 5 variables a,b,c,d,e undergo several changes. After all the blocks are processed, these 5 variables are pairwise added to h ie a to h[0], b to h[1] - and so on. This h becomes our final hash which is returned. + and so on. This h becomes our final hash which is returned. """ self.padded_data = self.padding() self.blocks = self.split_blocks() @@ -135,7 +137,7 @@ def test_sha1_hash(): def main(): """ Provides option 'string' or 'file' to take input and prints the calculated SHA1 - hash. unittest.main() has been commented because we probably don't want to run + hash. unittest.main() has been commented out because we probably don't want to run the test each time. """ # unittest.main() diff --git a/maths/pi_generator.py b/maths/pi_generator.py index dcd218aae..addd92174 100644 --- a/maths/pi_generator.py +++ b/maths/pi_generator.py @@ -3,60 +3,53 @@ def calculate_pi(limit: int) -> str: https://en.wikipedia.org/wiki/Leibniz_formula_for_%CF%80 Leibniz Formula for Pi - The Leibniz formula is the special case arctan 1 = 1/4 Pi . + The Leibniz formula is the special case arctan(1) = pi / 4. Leibniz's formula converges extremely slowly: it exhibits sublinear convergence. Convergence (https://en.wikipedia.org/wiki/Leibniz_formula_for_%CF%80#Convergence) We cannot try to prove against an interrupted, uncompleted generation. https://en.wikipedia.org/wiki/Leibniz_formula_for_%CF%80#Unusual_behaviour - The errors can in fact be predicted; - but those calculations also approach infinity for accuracy. + The errors can in fact be predicted, but those calculations also approach infinity + for accuracy. - Our output will always be a string since we can defintely store all digits in there. - For simplicity' sake, let's just compare against known values and since our outpit - is a string, we need to convert to float. + Our output will be a string so that we can definitely store all digits. >>> import math >>> float(calculate_pi(15)) == math.pi True - Since we cannot predict errors or interrupt any infinite alternating - series generation since they approach infinity, - or interrupt any alternating series, we are going to need math.isclose() + Since we cannot predict errors or interrupt any infinite alternating series + generation since they approach infinity, or interrupt any alternating series, we'll + need math.isclose() >>> math.isclose(float(calculate_pi(50)), math.pi) True - >>> math.isclose(float(calculate_pi(100)), math.pi) True - Since math.pi-constant contains only 16 digits, here some test with preknown values: + Since math.pi contains only 16 digits, here are some tests with known values: >>> calculate_pi(50) '3.14159265358979323846264338327950288419716939937510' >>> calculate_pi(80) '3.14159265358979323846264338327950288419716939937510582097494459230781640628620899' - - To apply the Leibniz formula for calculating pi, - the variables q, r, t, k, n, and l are used for the iteration process. """ + # Variables used for the iteration process q = 1 r = 0 t = 1 k = 1 n = 3 l = 3 + decimal = limit counter = 0 result = "" - """ - We will avoid using yield since we otherwise get a Generator-Object, - which we can't just compare against anything. We would have to make a list out of it - after the generation, so we will just stick to plain return logic: - """ + # We can't compare against anything if we make a generator, + # so we'll stick with plain return logic while counter != decimal + 1: if 4 * q + r - t < n * t: result += str(n) diff --git a/maths/radians.py b/maths/radians.py index 465467a3b..b8ac61cb1 100644 --- a/maths/radians.py +++ b/maths/radians.py @@ -3,7 +3,7 @@ from math import pi def radians(degree: float) -> float: """ - Coverts the given angle from degrees to radians + Converts the given angle from degrees to radians https://en.wikipedia.org/wiki/Radian >>> radians(180) @@ -16,7 +16,7 @@ def radians(degree: float) -> float: 1.9167205845401725 >>> from math import radians as math_radians - >>> all(abs(radians(i)-math_radians(i)) <= 0.00000001 for i in range(-2, 361)) + >>> all(abs(radians(i) - math_radians(i)) <= 1e-8 for i in range(-2, 361)) True """ diff --git a/maths/square_root.py b/maths/square_root.py index 2cbf14bea..4462ccb75 100644 --- a/maths/square_root.py +++ b/maths/square_root.py @@ -19,14 +19,13 @@ def get_initial_point(a: float) -> float: def square_root_iterative( - a: float, max_iter: int = 9999, tolerance: float = 0.00000000000001 + a: float, max_iter: int = 9999, tolerance: float = 1e-14 ) -> float: """ - Square root is aproximated using Newtons method. + Square root approximated using Newton's method. https://en.wikipedia.org/wiki/Newton%27s_method - >>> all(abs(square_root_iterative(i)-math.sqrt(i)) <= .00000000000001 - ... for i in range(500)) + >>> all(abs(square_root_iterative(i) - math.sqrt(i)) <= 1e-14 for i in range(500)) True >>> square_root_iterative(-1) diff --git a/neural_network/convolution_neural_network.py b/neural_network/convolution_neural_network.py index f5ec156f3..f2e88fe7b 100644 --- a/neural_network/convolution_neural_network.py +++ b/neural_network/convolution_neural_network.py @@ -2,7 +2,7 @@ - - - - - -- - - - - - - - - - - - - - - - - - - - - - - Name - - CNN - Convolution Neural Network For Photo Recognizing Goal - - Recognize Handing Writing Word Photo - Detail:Total 5 layers neural network + Detail: Total 5 layers neural network * Convolution layer * Pooling layer * Input layer layer of BP @@ -24,7 +24,7 @@ class CNN: self, conv1_get, size_p1, bp_num1, bp_num2, bp_num3, rate_w=0.2, rate_t=0.2 ): """ - :param conv1_get: [a,c,d],size, number, step of convolution kernel + :param conv1_get: [a,c,d], size, number, step of convolution kernel :param size_p1: pooling size :param bp_num1: units number of flatten layer :param bp_num2: units number of hidden layer @@ -71,7 +71,7 @@ class CNN: with open(save_path, "wb") as f: pickle.dump(model_dic, f) - print(f"Model saved: {save_path}") + print(f"Model saved: {save_path}") @classmethod def read_model(cls, model_path): @@ -210,7 +210,7 @@ class CNN: def train( self, patterns, datas_train, datas_teach, n_repeat, error_accuracy, draw_e=bool ): - # model traning + # model training print("----------------------Start Training-------------------------") print((" - - Shape: Train_Data ", np.shape(datas_train))) print((" - - Shape: Teach_Data ", np.shape(datas_teach))) diff --git a/neural_network/gan.py_tf b/neural_network/gan.py_tf index deb062c48..9c6e1c05b 100644 --- a/neural_network/gan.py_tf +++ b/neural_network/gan.py_tf @@ -158,7 +158,7 @@ if __name__ == "__main__": # G_b2 = np.random.normal(size=(784),scale=(1. / np.sqrt(784 / 2.))) *0.002 G_b7 = np.zeros(784) - # 3. For Adam Optimzier + # 3. For Adam Optimizer v1, m1 = 0, 0 v2, m2 = 0, 0 v3, m3 = 0, 0 diff --git a/other/graham_scan.py b/other/graham_scan.py index 2eadb4e56..3f11d40f1 100644 --- a/other/graham_scan.py +++ b/other/graham_scan.py @@ -1,5 +1,5 @@ """ -This is a pure Python implementation of the merge-insertion sort algorithm +This is a pure Python implementation of the Graham scan algorithm Source: https://en.wikipedia.org/wiki/Graham_scan For doctests run following command: @@ -142,8 +142,8 @@ def graham_scan(points: list[tuple[int, int]]) -> list[tuple[int, int]]: stack.append(sorted_points[0]) stack.append(sorted_points[1]) stack.append(sorted_points[2]) - # In any ways, the first 3 points line are towards left. - # Because we sort them the angle from minx, miny. + # The first 3 points lines are towards the left because we sort them by their angle + # from minx, miny. current_direction = Direction.left for i in range(3, len(sorted_points)): @@ -164,7 +164,7 @@ def graham_scan(points: list[tuple[int, int]]) -> list[tuple[int, int]]: break elif current_direction == Direction.right: # If the straight line is towards right, - # every previous points on those straigh line is not convex hull. + # every previous points on that straight line is not convex hull. stack.pop() if next_direction == Direction.right: stack.pop() diff --git a/other/linear_congruential_generator.py b/other/linear_congruential_generator.py index c016310f9..c7de15b94 100644 --- a/other/linear_congruential_generator.py +++ b/other/linear_congruential_generator.py @@ -8,9 +8,9 @@ class LinearCongruentialGenerator: A pseudorandom number generator. """ - # The default value for **seed** is the result of a function call which is not + # The default value for **seed** is the result of a function call, which is not # normally recommended and causes ruff to raise a B008 error. However, in this case, - # it is accptable because `LinearCongruentialGenerator.__init__()` will only be + # it is acceptable because `LinearCongruentialGenerator.__init__()` will only be # called once per instance and it ensures that each instance will generate a unique # sequence of numbers. diff --git a/other/password.py b/other/password.py index 9a6161af8..1ce0d5231 100644 --- a/other/password.py +++ b/other/password.py @@ -63,11 +63,12 @@ def random_characters(chars_incl, i): pass # Put your code here... -# This Will Check Whether A Given Password Is Strong Or Not -# It Follows The Rule that Length Of Password Should Be At Least 8 Characters -# And At Least 1 Lower, 1 Upper, 1 Number And 1 Special Character def is_strong_password(password: str, min_length: int = 8) -> bool: """ + This will check whether a given password is strong or not. The password must be at + least as long as the provided minimum length, and it must contain at least 1 + lowercase letter, 1 uppercase letter, 1 number and 1 special character. + >>> is_strong_password('Hwea7$2!') True >>> is_strong_password('Sh0r1') @@ -81,7 +82,6 @@ def is_strong_password(password: str, min_length: int = 8) -> bool: """ if len(password) < min_length: - # Your Password must be at least 8 characters long return False upper = any(char in ascii_uppercase for char in password) @@ -90,8 +90,6 @@ def is_strong_password(password: str, min_length: int = 8) -> bool: spec_char = any(char in punctuation for char in password) return upper and lower and num and spec_char - # Passwords should contain UPPERCASE, lowerase - # numbers, and special characters def main(): @@ -104,7 +102,7 @@ def main(): "Alternative Password generated:", alternative_password_generator(chars_incl, length), ) - print("[If you are thinking of using this passsword, You better save it.]") + print("[If you are thinking of using this password, You better save it.]") if __name__ == "__main__": diff --git a/physics/speed_of_sound.py b/physics/speed_of_sound.py index a4658366a..3fa952cdb 100644 --- a/physics/speed_of_sound.py +++ b/physics/speed_of_sound.py @@ -2,39 +2,35 @@ Title : Calculating the speed of sound Description : - The speed of sound (c) is the speed that a sound wave travels - per unit time (m/s). During propagation, the sound wave propagates - through an elastic medium. Its SI unit is meter per second (m/s). + The speed of sound (c) is the speed that a sound wave travels per unit time (m/s). + During propagation, the sound wave propagates through an elastic medium. - Only longitudinal waves can propagate in liquids and gas other then - solid where they also travel in transverse wave. The following Algo- - rithem calculates the speed of sound in fluid depanding on the bulk - module and the density of the fluid. + Sound propagates as longitudinal waves in liquids and gases and as transverse waves + in solids. This file calculates the speed of sound in a fluid based on its bulk + module and density. - Equation for calculating speed od sound in fluid: - c_fluid = (K_s*p)**0.5 + Equation for the speed of sound in a fluid: + c_fluid = sqrt(K_s / p) c_fluid: speed of sound in fluid K_s: isentropic bulk modulus p: density of fluid - - Source : https://en.wikipedia.org/wiki/Speed_of_sound """ def speed_of_sound_in_a_fluid(density: float, bulk_modulus: float) -> float: """ - This method calculates the speed of sound in fluid - - This is calculated from the other two provided values - Examples: - Example 1 --> Water 20°C: bulk_moduls= 2.15MPa, density=998kg/m³ - Example 2 --> Murcery 20°: bulk_moduls= 28.5MPa, density=13600kg/m³ + Calculates the speed of sound in a fluid from its density and bulk modulus - >>> speed_of_sound_in_a_fluid(bulk_modulus=2.15*10**9, density=998) + Examples: + Example 1 --> Water 20°C: bulk_modulus= 2.15MPa, density=998kg/m³ + Example 2 --> Mercury 20°C: bulk_modulus= 28.5MPa, density=13600kg/m³ + + >>> speed_of_sound_in_a_fluid(bulk_modulus=2.15e9, density=998) 1467.7563207952705 - >>> speed_of_sound_in_a_fluid(bulk_modulus=28.5*10**9, density=13600) + >>> speed_of_sound_in_a_fluid(bulk_modulus=28.5e9, density=13600) 1447.614670861731 """ diff --git a/project_euler/problem_035/sol1.py b/project_euler/problem_035/sol1.py index 17a4e9088..644c992ed 100644 --- a/project_euler/problem_035/sol1.py +++ b/project_euler/problem_035/sol1.py @@ -11,18 +11,18 @@ There are thirteen such primes below 100: 2, 3, 5, 7, 11, 13, 17, 31, 37, 71, 73 How many circular primes are there below one million? To solve this problem in an efficient manner, we will first mark all the primes -below 1 million using the Seive of Eratosthenes. Then, out of all these primes, -we will rule out the numbers which contain an even digit. After this we will +below 1 million using the Sieve of Eratosthenes. Then, out of all these primes, +we will rule out the numbers which contain an even digit. After this we will generate each circular combination of the number and check if all are prime. """ from __future__ import annotations -seive = [True] * 1000001 +sieve = [True] * 1000001 i = 2 while i * i <= 1000000: - if seive[i]: + if sieve[i]: for j in range(i * i, 1000001, i): - seive[j] = False + sieve[j] = False i += 1 @@ -36,7 +36,7 @@ def is_prime(n: int) -> bool: >>> is_prime(25363) False """ - return seive[n] + return sieve[n] def contains_an_even_digit(n: int) -> bool: diff --git a/project_euler/problem_135/sol1.py b/project_euler/problem_135/sol1.py index d71a0439c..ac91fa4e2 100644 --- a/project_euler/problem_135/sol1.py +++ b/project_euler/problem_135/sol1.py @@ -1,28 +1,22 @@ """ Project Euler Problem 135: https://projecteuler.net/problem=135 -Given the positive integers, x, y, and z, -are consecutive terms of an arithmetic progression, -the least value of the positive integer, n, -for which the equation, +Given the positive integers, x, y, and z, are consecutive terms of an arithmetic +progression, the least value of the positive integer, n, for which the equation, x2 − y2 − z2 = n, has exactly two solutions is n = 27: 342 − 272 − 202 = 122 − 92 − 62 = 27 -It turns out that n = 1155 is the least value -which has exactly ten solutions. +It turns out that n = 1155 is the least value which has exactly ten solutions. -How many values of n less than one million -have exactly ten distinct solutions? +How many values of n less than one million have exactly ten distinct solutions? -Taking x,y,z of the form a+d,a,a-d respectively, -the given equation reduces to a*(4d-a)=n. -Calculating no of solutions for every n till 1 million by fixing a -,and n must be multiple of a. -Total no of steps=n*(1/1+1/2+1/3+1/4..+1/n) -,so roughly O(nlogn) time complexity. - +Taking x, y, z of the form a + d, a, a - d respectively, the given equation reduces to +a * (4d - a) = n. +Calculating no of solutions for every n till 1 million by fixing a, and n must be a +multiple of a. Total no of steps = n * (1/1 + 1/2 + 1/3 + 1/4 + ... + 1/n), so roughly +O(nlogn) time complexity. """ @@ -42,15 +36,15 @@ def solution(limit: int = 1000000) -> int: for first_term in range(1, limit): for n in range(first_term, limit, first_term): common_difference = first_term + n / first_term - if common_difference % 4: # d must be divisble by 4 + if common_difference % 4: # d must be divisible by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference - ): # since x,y,z are positive integers - frequency[n] += 1 # so z>0 and a>d ,also 4d 0, a > d and 4d < a count = sum(1 for x in frequency[1:limit] if x == 10) diff --git a/project_euler/problem_493/sol1.py b/project_euler/problem_493/sol1.py index c9879a528..4d96c6c32 100644 --- a/project_euler/problem_493/sol1.py +++ b/project_euler/problem_493/sol1.py @@ -9,7 +9,7 @@ Give your answer with nine digits after the decimal point (a.bcdefghij). This combinatorial problem can be solved by decomposing the problem into the following steps: -1. Calculate the total number of possible picking cominations +1. Calculate the total number of possible picking combinations [combinations := binom_coeff(70, 20)] 2. Calculate the number of combinations with one colour missing [missing := binom_coeff(60, 20)] diff --git a/pyproject.toml b/pyproject.toml index f9091fb85..75da7a045 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -130,5 +130,5 @@ omit = [".env/*"] sort = "Cover" [tool.codespell] -ignore-words-list = "3rt,ans,crate,damon,fo,followings,hist,iff,kwanza,mater,secant,som,sur,tim,zar" +ignore-words-list = "3rt,ans,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,zar" skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" From fa077e6703758afcae4f19347a4388b9230d568f Mon Sep 17 00:00:00 2001 From: hollowcrust <72879387+hollowcrust@users.noreply.github.com> Date: Sun, 8 Oct 2023 16:58:48 +0800 Subject: [PATCH 053/306] Add doctests, type hints; fix bug for dynamic_programming/minimum_partition.py (#10012) * Add doctests, type hints; fix bug * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- dynamic_programming/minimum_partition.py | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/dynamic_programming/minimum_partition.py b/dynamic_programming/minimum_partition.py index 3daa9767f..e6188cb33 100644 --- a/dynamic_programming/minimum_partition.py +++ b/dynamic_programming/minimum_partition.py @@ -3,13 +3,25 @@ Partition a set into two subsets such that the difference of subset sums is mini """ -def find_min(arr): +def find_min(arr: list[int]) -> int: + """ + >>> find_min([1, 2, 3, 4, 5]) + 1 + >>> find_min([5, 5, 5, 5, 5]) + 5 + >>> find_min([5, 5, 5, 5]) + 0 + >>> find_min([3]) + 3 + >>> find_min([]) + 0 + """ n = len(arr) s = sum(arr) dp = [[False for x in range(s + 1)] for y in range(n + 1)] - for i in range(1, n + 1): + for i in range(n + 1): dp[i][0] = True for i in range(1, s + 1): @@ -17,7 +29,7 @@ def find_min(arr): for i in range(1, n + 1): for j in range(1, s + 1): - dp[i][j] = dp[i][j - 1] + dp[i][j] = dp[i - 1][j] if arr[i - 1] <= j: dp[i][j] = dp[i][j] or dp[i - 1][j - arr[i - 1]] @@ -28,3 +40,9 @@ def find_min(arr): break return diff + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 937ce83b150f0a217c7fa63c75a095534ae8bfeb Mon Sep 17 00:00:00 2001 From: Om Alve Date: Sun, 8 Oct 2023 16:35:01 +0530 Subject: [PATCH 054/306] Added fractionated_morse_cipher (#9442) * Added fractionated_morse_cipher * Added return type hint for main function * Added doctest for main * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Replaced main function * changed the references section Co-authored-by: Christian Clauss * removed repetitive datatype hint in the docstring Co-authored-by: Christian Clauss * changed dictionary comprehension variable names to something more compact Co-authored-by: Christian Clauss * Update fractionated_morse_cipher.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- ciphers/fractionated_morse_cipher.py | 167 +++++++++++++++++++++++++++ 1 file changed, 167 insertions(+) create mode 100644 ciphers/fractionated_morse_cipher.py diff --git a/ciphers/fractionated_morse_cipher.py b/ciphers/fractionated_morse_cipher.py new file mode 100644 index 000000000..c1d5dc6d5 --- /dev/null +++ b/ciphers/fractionated_morse_cipher.py @@ -0,0 +1,167 @@ +""" +Python program for the Fractionated Morse Cipher. + +The Fractionated Morse cipher first converts the plaintext to Morse code, +then enciphers fixed-size blocks of Morse code back to letters. +This procedure means plaintext letters are mixed into the ciphertext letters, +making it more secure than substitution ciphers. + +http://practicalcryptography.com/ciphers/fractionated-morse-cipher/ +""" +import string + +MORSE_CODE_DICT = { + "A": ".-", + "B": "-...", + "C": "-.-.", + "D": "-..", + "E": ".", + "F": "..-.", + "G": "--.", + "H": "....", + "I": "..", + "J": ".---", + "K": "-.-", + "L": ".-..", + "M": "--", + "N": "-.", + "O": "---", + "P": ".--.", + "Q": "--.-", + "R": ".-.", + "S": "...", + "T": "-", + "U": "..-", + "V": "...-", + "W": ".--", + "X": "-..-", + "Y": "-.--", + "Z": "--..", + " ": "", +} + +# Define possible trigrams of Morse code +MORSE_COMBINATIONS = [ + "...", + "..-", + "..x", + ".-.", + ".--", + ".-x", + ".x.", + ".x-", + ".xx", + "-..", + "-.-", + "-.x", + "--.", + "---", + "--x", + "-x.", + "-x-", + "-xx", + "x..", + "x.-", + "x.x", + "x-.", + "x--", + "x-x", + "xx.", + "xx-", + "xxx", +] + +# Create a reverse dictionary for Morse code +REVERSE_DICT = {value: key for key, value in MORSE_CODE_DICT.items()} + + +def encode_to_morse(plaintext: str) -> str: + """Encode a plaintext message into Morse code. + + Args: + plaintext: The plaintext message to encode. + + Returns: + The Morse code representation of the plaintext message. + + Example: + >>> encode_to_morse("defend the east") + '-..x.x..-.x.x-.x-..xx-x....x.xx.x.-x...x-' + """ + return "x".join([MORSE_CODE_DICT.get(letter.upper(), "") for letter in plaintext]) + + +def encrypt_fractionated_morse(plaintext: str, key: str) -> str: + """Encrypt a plaintext message using Fractionated Morse Cipher. + + Args: + plaintext: The plaintext message to encrypt. + key: The encryption key. + + Returns: + The encrypted ciphertext. + + Example: + >>> encrypt_fractionated_morse("defend the east","Roundtable") + 'ESOAVVLJRSSTRX' + + """ + morse_code = encode_to_morse(plaintext) + key = key.upper() + string.ascii_uppercase + key = "".join(sorted(set(key), key=key.find)) + + # Ensure morse_code length is a multiple of 3 + padding_length = 3 - (len(morse_code) % 3) + morse_code += "x" * padding_length + + fractionated_morse_dict = {v: k for k, v in zip(key, MORSE_COMBINATIONS)} + fractionated_morse_dict["xxx"] = "" + encrypted_text = "".join( + [ + fractionated_morse_dict[morse_code[i : i + 3]] + for i in range(0, len(morse_code), 3) + ] + ) + return encrypted_text + + +def decrypt_fractionated_morse(ciphertext: str, key: str) -> str: + """Decrypt a ciphertext message encrypted with Fractionated Morse Cipher. + + Args: + ciphertext: The ciphertext message to decrypt. + key: The decryption key. + + Returns: + The decrypted plaintext message. + + Example: + >>> decrypt_fractionated_morse("ESOAVVLJRSSTRX","Roundtable") + 'DEFEND THE EAST' + """ + key = key.upper() + string.ascii_uppercase + key = "".join(sorted(set(key), key=key.find)) + + inverse_fractionated_morse_dict = dict(zip(key, MORSE_COMBINATIONS)) + morse_code = "".join( + [inverse_fractionated_morse_dict.get(letter, "") for letter in ciphertext] + ) + decrypted_text = "".join( + [REVERSE_DICT[code] for code in morse_code.split("x")] + ).strip() + return decrypted_text + + +if __name__ == "__main__": + """ + Example usage of Fractionated Morse Cipher. + """ + plaintext = "defend the east" + print("Plain Text:", plaintext) + key = "ROUNDTABLE" + + ciphertext = encrypt_fractionated_morse(plaintext, key) + print("Encrypted:", ciphertext) + + decrypted_text = decrypt_fractionated_morse(ciphertext, key) + print("Decrypted:", decrypted_text) From 08d394126c9d46fc9d227a0dc1e343ad1fa70679 Mon Sep 17 00:00:00 2001 From: Kausthub Kannan Date: Sun, 8 Oct 2023 21:18:22 +0530 Subject: [PATCH 055/306] Changed Mish Activation Function to use Softplus (#10111) --- neural_network/activation_functions/mish.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/neural_network/activation_functions/mish.py b/neural_network/activation_functions/mish.py index e4f98307f..e51655df8 100644 --- a/neural_network/activation_functions/mish.py +++ b/neural_network/activation_functions/mish.py @@ -7,6 +7,7 @@ https://en.wikipedia.org/wiki/Rectifier_(neural_networks)#Mish """ import numpy as np +from softplus import softplus def mish(vector: np.ndarray) -> np.ndarray: @@ -30,7 +31,7 @@ def mish(vector: np.ndarray) -> np.ndarray: array([-0.00092952, -0.15113318, 0.33152014, -0.04745745]) """ - return vector * np.tanh(np.log(1 + np.exp(vector))) + return vector * np.tanh(softplus(vector)) if __name__ == "__main__": From 6860daea60a512b202481bd5dd00d6534e162b77 Mon Sep 17 00:00:00 2001 From: Aarya Balwadkar <142713127+AaryaBalwadkar@users.noreply.github.com> Date: Sun, 8 Oct 2023 21:23:38 +0530 Subject: [PATCH 056/306] Made Changes shifted CRT, modular division to maths directory (#10084) --- {blockchain => maths}/chinese_remainder_theorem.py | 0 {blockchain => maths}/modular_division.py | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename {blockchain => maths}/chinese_remainder_theorem.py (100%) rename {blockchain => maths}/modular_division.py (100%) diff --git a/blockchain/chinese_remainder_theorem.py b/maths/chinese_remainder_theorem.py similarity index 100% rename from blockchain/chinese_remainder_theorem.py rename to maths/chinese_remainder_theorem.py diff --git a/blockchain/modular_division.py b/maths/modular_division.py similarity index 100% rename from blockchain/modular_division.py rename to maths/modular_division.py From 81b29066d206217cb689fe2c9c8d530a1aa66cbe Mon Sep 17 00:00:00 2001 From: Arnav Kohli <95236897+THEGAMECHANGER416@users.noreply.github.com> Date: Sun, 8 Oct 2023 21:34:43 +0530 Subject: [PATCH 057/306] Created folder for losses in Machine_Learning (#9969) * Created folder for losses in Machine_Learning * Update binary_cross_entropy.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update mean_squared_error.py * Update binary_cross_entropy.py * Update mean_squared_error.py * Update binary_cross_entropy.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update mean_squared_error.py * Update binary_cross_entropy.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update mean_squared_error.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update binary_cross_entropy.py * Update mean_squared_error.py * Update binary_cross_entropy.py * Update mean_squared_error.py * Update machine_learning/losses/binary_cross_entropy.py Co-authored-by: Christian Clauss * Update machine_learning/losses/mean_squared_error.py Co-authored-by: Christian Clauss * Update machine_learning/losses/binary_cross_entropy.py Co-authored-by: Christian Clauss * Update mean_squared_error.py * Update machine_learning/losses/mean_squared_error.py Co-authored-by: Tianyi Zheng * Update binary_cross_entropy.py * Update mean_squared_error.py * Update binary_cross_entropy.py * Update mean_squared_error.py * Update mean_squared_error.py * Update binary_cross_entropy.py * renamed: losses -> loss_functions * updated 2 files * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update mean_squared_error.py * Update mean_squared_error.py * Update binary_cross_entropy.py * Update mean_squared_error.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: Tianyi Zheng --- .../loss_functions/binary_cross_entropy.py | 59 +++++++++++++++++++ .../loss_functions/mean_squared_error.py | 51 ++++++++++++++++ 2 files changed, 110 insertions(+) create mode 100644 machine_learning/loss_functions/binary_cross_entropy.py create mode 100644 machine_learning/loss_functions/mean_squared_error.py diff --git a/machine_learning/loss_functions/binary_cross_entropy.py b/machine_learning/loss_functions/binary_cross_entropy.py new file mode 100644 index 000000000..4ebca7f21 --- /dev/null +++ b/machine_learning/loss_functions/binary_cross_entropy.py @@ -0,0 +1,59 @@ +""" +Binary Cross-Entropy (BCE) Loss Function + +Description: +Quantifies dissimilarity between true labels (0 or 1) and predicted probabilities. +It's widely used in binary classification tasks. + +Formula: +BCE = -Σ(y_true * log(y_pred) + (1 - y_true) * log(1 - y_pred)) + +Source: +[Wikipedia - Cross entropy](https://en.wikipedia.org/wiki/Cross_entropy) +""" + +import numpy as np + + +def binary_cross_entropy( + y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-15 +) -> float: + """ + Calculate the BCE Loss between true labels and predicted probabilities. + + Parameters: + - y_true: True binary labels (0 or 1). + - y_pred: Predicted probabilities for class 1. + - epsilon: Small constant to avoid numerical instability. + + Returns: + - bce_loss: Binary Cross-Entropy Loss. + + Example Usage: + >>> true_labels = np.array([0, 1, 1, 0, 1]) + >>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8]) + >>> binary_cross_entropy(true_labels, predicted_probs) + 0.2529995012327421 + >>> true_labels = np.array([0, 1, 1, 0, 1]) + >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) + >>> binary_cross_entropy(true_labels, predicted_probs) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same length. + """ + if len(y_true) != len(y_pred): + raise ValueError("Input arrays must have the same length.") + # Clip predicted probabilities to avoid log(0) and log(1) + y_pred = np.clip(y_pred, epsilon, 1 - epsilon) + + # Calculate binary cross-entropy loss + bce_loss = -(y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred)) + + # Take the mean over all samples + return np.mean(bce_loss) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/machine_learning/loss_functions/mean_squared_error.py b/machine_learning/loss_functions/mean_squared_error.py new file mode 100644 index 000000000..d2b0e1e15 --- /dev/null +++ b/machine_learning/loss_functions/mean_squared_error.py @@ -0,0 +1,51 @@ +""" +Mean Squared Error (MSE) Loss Function + +Description: +MSE measures the mean squared difference between true values and predicted values. +It serves as a measure of the model's accuracy in regression tasks. + +Formula: +MSE = (1/n) * Σ(y_true - y_pred)^2 + +Source: +[Wikipedia - Mean squared error](https://en.wikipedia.org/wiki/Mean_squared_error) +""" + +import numpy as np + + +def mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: + """ + Calculate the Mean Squared Error (MSE) between two arrays. + + Parameters: + - y_true: The true values (ground truth). + - y_pred: The predicted values. + + Returns: + - mse: The Mean Squared Error between y_true and y_pred. + + Example usage: + >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) + >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) + >>> mean_squared_error(true_values, predicted_values) + 0.028000000000000032 + >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) + >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) + >>> mean_squared_error(true_labels, predicted_probs) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same length. + """ + if len(y_true) != len(y_pred): + raise ValueError("Input arrays must have the same length.") + + squared_errors = (y_true - y_pred) ** 2 + return np.mean(squared_errors) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From a12b07f352d51af1cb86c14f865cf2b18aba3ea1 Mon Sep 17 00:00:00 2001 From: Kausthub Kannan Date: Sun, 8 Oct 2023 21:38:37 +0530 Subject: [PATCH 058/306] Added Squareplus Activation Function (#9977) * Added Squareplus Activation Function * Added parameter beta to the function * Fixed Squareplus Function * Update neural_network/activation_functions/squareplus.py --------- Co-authored-by: Tianyi Zheng --- .../activation_functions/squareplus.py | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 neural_network/activation_functions/squareplus.py diff --git a/neural_network/activation_functions/squareplus.py b/neural_network/activation_functions/squareplus.py new file mode 100644 index 000000000..40fa800d6 --- /dev/null +++ b/neural_network/activation_functions/squareplus.py @@ -0,0 +1,38 @@ +""" +Squareplus Activation Function + +Use Case: Squareplus designed to enhance positive values and suppress negative values. +For more detailed information, you can refer to the following link: +https://en.wikipedia.org/wiki/Rectifier_(neural_networks)#Squareplus +""" + +import numpy as np + + +def squareplus(vector: np.ndarray, beta: float) -> np.ndarray: + """ + Implements the SquarePlus activation function. + + Parameters: + vector (np.ndarray): The input array for the SquarePlus activation. + beta (float): size of the curved region + + Returns: + np.ndarray: The input array after applying the SquarePlus activation. + + Formula: f(x) = ( x + sqrt(x^2 + b) ) / 2 + + Examples: + >>> squareplus(np.array([2.3, 0.6, -2, -3.8]), beta=2) + array([2.5 , 1.06811457, 0.22474487, 0.12731349]) + + >>> squareplus(np.array([-9.2, -0.3, 0.45, -4.56]), beta=3) + array([0.0808119 , 0.72891979, 1.11977651, 0.15893419]) + """ + return (vector + np.sqrt(vector**2 + beta)) / 2 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From e89ae55d8e157cb7c6c3f855188a0fde29083c35 Mon Sep 17 00:00:00 2001 From: Saurabh Mahapatra <98408932+its-100rabh@users.noreply.github.com> Date: Sun, 8 Oct 2023 21:40:41 +0530 Subject: [PATCH 059/306] Create strip.py (#10011) * Create strip.py * Update strip.py --- strings/strip.py | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 strings/strip.py diff --git a/strings/strip.py b/strings/strip.py new file mode 100644 index 000000000..d4f901f0c --- /dev/null +++ b/strings/strip.py @@ -0,0 +1,33 @@ +def strip(user_string: str, characters: str = " \t\n\r") -> str: + """ + Remove leading and trailing characters (whitespace by default) from a string. + + Args: + user_string (str): The input string to be stripped. + characters (str, optional): Optional characters to be removed + (default is whitespace). + + Returns: + str: The stripped string. + + Examples: + >>> strip(" hello ") + 'hello' + >>> strip("...world...", ".") + 'world' + >>> strip("123hello123", "123") + 'hello' + >>> strip("") + '' + """ + + start = 0 + end = len(user_string) + + while start < end and user_string[start] in characters: + start += 1 + + while end > start and user_string[end - 1] in characters: + end -= 1 + + return user_string[start:end] From 982bc2735872592d036c20389859071f36b13469 Mon Sep 17 00:00:00 2001 From: Kosuri L Indu <118645569+kosuri-indu@users.noreply.github.com> Date: Sun, 8 Oct 2023 22:37:02 +0530 Subject: [PATCH 060/306] add : Best time to buy and sell stock program under GREEDY methods (#10114) * to add best_time_stock program * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update best_time_to_buy_and_sell_stock.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../best_time_to_buy_and_sell_stock.py | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 greedy_methods/best_time_to_buy_and_sell_stock.py diff --git a/greedy_methods/best_time_to_buy_and_sell_stock.py b/greedy_methods/best_time_to_buy_and_sell_stock.py new file mode 100644 index 000000000..4aea19172 --- /dev/null +++ b/greedy_methods/best_time_to_buy_and_sell_stock.py @@ -0,0 +1,42 @@ +""" +Given a list of stock prices calculate the maximum profit that can be made from a +single buy and sell of one share of stock. We only allowed to complete one buy +transaction and one sell transaction but must buy before we sell. + +Example : prices = [7, 1, 5, 3, 6, 4] +max_profit will return 5 - which is by buying at price 1 and selling at price 6. + +This problem can be solved using the concept of "GREEDY ALGORITHM". + +We iterate over the price array once, keeping track of the lowest price point +(buy) and the maximum profit we can get at each point. The greedy choice at each point +is to either buy at the current price if it's less than our current buying price, or +sell at the current price if the profit is more than our current maximum profit. +""" + + +def max_profit(prices: list[int]) -> int: + """ + >>> max_profit([7, 1, 5, 3, 6, 4]) + 5 + >>> max_profit([7, 6, 4, 3, 1]) + 0 + """ + if not prices: + return 0 + + min_price = prices[0] + max_profit: int = 0 + + for price in prices: + min_price = min(price, min_price) + max_profit = max(price - min_price, max_profit) + + return max_profit + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + print(max_profit([7, 1, 5, 3, 6, 4])) From e7a59bfff5b182fb01530aa4b1a29b804efb1425 Mon Sep 17 00:00:00 2001 From: SubhranShu2332 <124662904+SubhranShu2332@users.noreply.github.com> Date: Mon, 9 Oct 2023 00:47:02 +0530 Subject: [PATCH 061/306] In place of calculating the factorial several times we can run a loop k times to calculate the combination (#10051) * In place of calculating the factorial several times we can run a loop k times to calculate the combination for example: 5 C 3 = 5! / (3! * (5-3)! ) = (5*4*3*2*1)/[(3*2*1)*(2*1)] =(5*4*3)/(3*2*1) so running a loop k times will reduce the time complexity to O(k) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/combinations.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- maths/combinations.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/maths/combinations.py b/maths/combinations.py index a2324012c..6e9e1a807 100644 --- a/maths/combinations.py +++ b/maths/combinations.py @@ -1,7 +1,6 @@ """ https://en.wikipedia.org/wiki/Combination """ -from math import factorial def combinations(n: int, k: int) -> int: @@ -35,7 +34,11 @@ def combinations(n: int, k: int) -> int: # to calculate a factorial of a negative number, which is not possible if n < k or k < 0: raise ValueError("Please enter positive integers for n and k where n >= k") - return factorial(n) // (factorial(k) * factorial(n - k)) + res = 1 + for i in range(k): + res *= n - i + res //= i + 1 + return res if __name__ == "__main__": From c8f6f79f8038ef090a396725c80fa77d9186fb4b Mon Sep 17 00:00:00 2001 From: Siddharth Warrier <117698635+siddwarr@users.noreply.github.com> Date: Mon, 9 Oct 2023 01:10:14 +0530 Subject: [PATCH 062/306] Power of 4 (#9505) * added power_of_4 * updated power_of_4 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated power_of_4 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated power_of_4 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated power_of_4 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated power_of_4 * added type check * added tescase --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- bit_manipulation/power_of_4.py | 67 ++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 bit_manipulation/power_of_4.py diff --git a/bit_manipulation/power_of_4.py b/bit_manipulation/power_of_4.py new file mode 100644 index 000000000..09e6e2862 --- /dev/null +++ b/bit_manipulation/power_of_4.py @@ -0,0 +1,67 @@ +""" + +Task: +Given a positive int number. Return True if this number is power of 4 +or False otherwise. + +Implementation notes: Use bit manipulation. +For example if the number is the power of 2 it's bits representation: +n = 0..100..00 +n - 1 = 0..011..11 + +n & (n - 1) - no intersections = 0 +If the number is a power of 4 then it should be a power of 2 +and the set bit should be at an odd position. +""" + + +def power_of_4(number: int) -> bool: + """ + Return True if this number is power of 4 or False otherwise. + + >>> power_of_4(0) + Traceback (most recent call last): + ... + ValueError: number must be positive + >>> power_of_4(1) + True + >>> power_of_4(2) + False + >>> power_of_4(4) + True + >>> power_of_4(6) + False + >>> power_of_4(8) + False + >>> power_of_4(17) + False + >>> power_of_4(64) + True + >>> power_of_4(-1) + Traceback (most recent call last): + ... + ValueError: number must be positive + >>> power_of_4(1.2) + Traceback (most recent call last): + ... + TypeError: number must be an integer + + """ + if not isinstance(number, int): + raise TypeError("number must be an integer") + if number <= 0: + raise ValueError("number must be positive") + if number & (number - 1) == 0: + c = 0 + while number: + c += 1 + number >>= 1 + return c % 2 == 1 + else: + return False + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 17de908d1ad5a3eb7eb0c850e64394b62e4674c3 Mon Sep 17 00:00:00 2001 From: Achal Jain Date: Mon, 9 Oct 2023 01:11:30 +0530 Subject: [PATCH 063/306] Added Median of Medians Algorithm (#9864) * Added Median of Medians Algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update median_of_medians.py as per requested changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- searches/median_of_medians.py | 107 ++++++++++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 searches/median_of_medians.py diff --git a/searches/median_of_medians.py b/searches/median_of_medians.py new file mode 100644 index 000000000..a8011a34a --- /dev/null +++ b/searches/median_of_medians.py @@ -0,0 +1,107 @@ +""" +A Python implementation of the Median of Medians algorithm +to select pivots for quick_select, which is efficient for +calculating the value that would appear in the index of a +list if it would be sorted, even if it is not already +sorted. Search in time complexity O(n) at any rank +deterministically +https://en.wikipedia.org/wiki/Median_of_medians +""" + + +def median_of_five(arr: list) -> int: + """ + Return the median of the input list + :param arr: Array to find median of + :return: median of arr + + >>> median_of_five([2, 4, 5, 7, 899]) + 5 + >>> median_of_five([5, 7, 899, 54, 32]) + 32 + >>> median_of_five([5, 4, 3, 2]) + 4 + >>> median_of_five([3, 5, 7, 10, 2]) + 5 + """ + arr = sorted(arr) + return arr[len(arr) // 2] + + +def median_of_medians(arr: list) -> int: + """ + Return a pivot to partition data on by calculating + Median of medians of input data + :param arr: The data to be checked (a list) + :return: median of medians of input array + + >>> median_of_medians([2, 4, 5, 7, 899, 54, 32]) + 54 + >>> median_of_medians([5, 7, 899, 54, 32]) + 32 + >>> median_of_medians([5, 4, 3, 2]) + 4 + >>> median_of_medians([3, 5, 7, 10, 2, 12]) + 12 + """ + + if len(arr) <= 5: + return median_of_five(arr) + medians = [] + i = 0 + while i < len(arr): + if (i + 4) <= len(arr): + medians.append(median_of_five(arr[i:].copy())) + else: + medians.append(median_of_five(arr[i : i + 5].copy())) + i += 5 + return median_of_medians(medians) + + +def quick_select(arr: list, target: int) -> int: + """ + Two way partition the data into smaller and greater lists, + in relationship to the pivot + :param arr: The data to be searched (a list) + :param target: The rank to be searched + :return: element at rank target + + >>> quick_select([2, 4, 5, 7, 899, 54, 32], 5) + 32 + >>> quick_select([2, 4, 5, 7, 899, 54, 32], 1) + 2 + >>> quick_select([5, 4, 3, 2], 2) + 3 + >>> quick_select([3, 5, 7, 10, 2, 12], 3) + 5 + """ + + # Invalid Input + if target > len(arr): + return -1 + + # x is the estimated pivot by median of medians algorithm + x = median_of_medians(arr) + left = [] + right = [] + check = False + for i in range(len(arr)): + if arr[i] < x: + left.append(arr[i]) + elif arr[i] > x: + right.append(arr[i]) + elif arr[i] == x and not check: + check = True + else: + right.append(arr[i]) + rank_x = len(left) + 1 + if rank_x == target: + answer = x + elif rank_x > target: + answer = quick_select(left, target) + elif rank_x < target: + answer = quick_select(right, target - rank_x) + return answer + + +print(median_of_five([5, 4, 3, 2])) From 8e108ed92ab9a50d5a3e6f647fa33238270e21d1 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sun, 8 Oct 2023 15:43:07 -0400 Subject: [PATCH 064/306] Rename maths/binary_exponentiation_3.py (#9656) * updating DIRECTORY.md * Rename binary_exponentiation_3.py Rename binary_exponentiation_3.py to binary_exponentiation_2.py because the original binary_exponentiation_2.py was renamed to binary_multiplication.py in PR #9513 * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 5 +++-- ...binary_exponentiation_3.py => binary_exponentiation_2.py} | 0 2 files changed, 3 insertions(+), 2 deletions(-) rename maths/{binary_exponentiation_3.py => binary_exponentiation_2.py} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index a975b9264..55b270624 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -556,7 +556,7 @@ * [Bell Numbers](maths/bell_numbers.py) * [Binary Exp Mod](maths/binary_exp_mod.py) * [Binary Exponentiation](maths/binary_exponentiation.py) - * [Binary Exponentiation 3](maths/binary_exponentiation_3.py) + * [Binary Exponentiation 2](maths/binary_exponentiation_2.py) * [Binary Multiplication](maths/binary_multiplication.py) * [Binomial Coefficient](maths/binomial_coefficient.py) * [Binomial Distribution](maths/binomial_distribution.py) @@ -588,7 +588,6 @@ * [Find Min](maths/find_min.py) * [Floor](maths/floor.py) * [Gamma](maths/gamma.py) - * [Gamma Recursive](maths/gamma_recursive.py) * [Gaussian](maths/gaussian.py) * [Gaussian Error Linear Unit](maths/gaussian_error_linear_unit.py) * [Gcd Of N Numbers](maths/gcd_of_n_numbers.py) @@ -723,6 +722,7 @@ * Activation Functions * [Exponential Linear Unit](neural_network/activation_functions/exponential_linear_unit.py) * [Leaky Rectified Linear Unit](neural_network/activation_functions/leaky_rectified_linear_unit.py) + * [Mish](neural_network/activation_functions/mish.py) * [Rectified Linear Unit](neural_network/activation_functions/rectified_linear_unit.py) * [Scaled Exponential Linear Unit](neural_network/activation_functions/scaled_exponential_linear_unit.py) * [Sigmoid Linear Unit](neural_network/activation_functions/sigmoid_linear_unit.py) @@ -748,6 +748,7 @@ * [Linear Congruential Generator](other/linear_congruential_generator.py) * [Lru Cache](other/lru_cache.py) * [Magicdiamondpattern](other/magicdiamondpattern.py) + * [Majority Vote Algorithm](other/majority_vote_algorithm.py) * [Maximum Subsequence](other/maximum_subsequence.py) * [Nested Brackets](other/nested_brackets.py) * [Number Container System](other/number_container_system.py) diff --git a/maths/binary_exponentiation_3.py b/maths/binary_exponentiation_2.py similarity index 100% rename from maths/binary_exponentiation_3.py rename to maths/binary_exponentiation_2.py From 2d02500332533bb314f91675a3c30ea05bd52b5a Mon Sep 17 00:00:00 2001 From: halfhearted <99018821+Arunsiva003@users.noreply.github.com> Date: Mon, 9 Oct 2023 01:14:49 +0530 Subject: [PATCH 065/306] equilibrium index in an array (#9856) * equilibrium index in an array * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * equilibrium index in an array * equilibrium index in an array * equilibrium index in an array removed type in docstring --------- Co-authored-by: ArunSiva Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../arrays/equilibrium_index_in_array.py | 59 +++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 data_structures/arrays/equilibrium_index_in_array.py diff --git a/data_structures/arrays/equilibrium_index_in_array.py b/data_structures/arrays/equilibrium_index_in_array.py new file mode 100644 index 000000000..4099896d2 --- /dev/null +++ b/data_structures/arrays/equilibrium_index_in_array.py @@ -0,0 +1,59 @@ +""" +Find the Equilibrium Index of an Array. +Reference: https://www.geeksforgeeks.org/equilibrium-index-of-an-array/ + +Python doctests can be run with the following command: +python -m doctest -v equilibrium_index.py + +Given a sequence arr[] of size n, this function returns +an equilibrium index (if any) or -1 if no equilibrium index exists. + +The equilibrium index of an array is an index such that the sum of +elements at lower indexes is equal to the sum of elements at higher indexes. + + + +Example Input: +arr = [-7, 1, 5, 2, -4, 3, 0] +Output: 3 + +""" + + +def equilibrium_index(arr: list[int], size: int) -> int: + """ + Find the equilibrium index of an array. + + Args: + arr : The input array of integers. + size : The size of the array. + + Returns: + int: The equilibrium index or -1 if no equilibrium index exists. + + Examples: + >>> equilibrium_index([-7, 1, 5, 2, -4, 3, 0], 7) + 3 + >>> equilibrium_index([1, 2, 3, 4, 5], 5) + -1 + >>> equilibrium_index([1, 1, 1, 1, 1], 5) + 2 + >>> equilibrium_index([2, 4, 6, 8, 10, 3], 6) + -1 + """ + total_sum = sum(arr) + left_sum = 0 + + for i in range(size): + total_sum -= arr[i] + if left_sum == total_sum: + return i + left_sum += arr[i] + + return -1 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 66e4ea6a621cccabd6116f1543432899a4411daa Mon Sep 17 00:00:00 2001 From: Anshu Sharma <142900182+AnshuSharma111@users.noreply.github.com> Date: Mon, 9 Oct 2023 01:47:22 +0530 Subject: [PATCH 066/306] Consolidated two scripts reverse_letters.py and reverse_long_words.py into one (#10140) * Conolidated two scripts reverse_letters.py and reverse_long_words.py into one because of similar functionality * Added a new line to accomodate characters without going over 88 char limit * fixed grammar to pass pre-commit * Changed faulty test case entirely to pass pre commit * fixed a test case which was wrong --------- Co-authored-by: Keyboard-1 <142900182+Keyboard-1@users.noreply.github.com> --- DIRECTORY.md | 1 - strings/reverse_letters.py | 27 ++++++++++++++++----------- strings/reverse_long_words.py | 21 --------------------- 3 files changed, 16 insertions(+), 33 deletions(-) delete mode 100644 strings/reverse_long_words.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 55b270624..b1a23a239 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1197,7 +1197,6 @@ * [Rabin Karp](strings/rabin_karp.py) * [Remove Duplicate](strings/remove_duplicate.py) * [Reverse Letters](strings/reverse_letters.py) - * [Reverse Long Words](strings/reverse_long_words.py) * [Reverse Words](strings/reverse_words.py) * [Snake Case To Camel Pascal Case](strings/snake_case_to_camel_pascal_case.py) * [Split](strings/split.py) diff --git a/strings/reverse_letters.py b/strings/reverse_letters.py index 10b8a6d72..4f73f816b 100644 --- a/strings/reverse_letters.py +++ b/strings/reverse_letters.py @@ -1,19 +1,24 @@ -def reverse_letters(input_str: str) -> str: +def reverse_letters(sentence: str, length: int = 0) -> str: """ - Reverses letters in a given string without adjusting the position of the words - >>> reverse_letters('The cat in the hat') - 'ehT tac ni eht tah' - >>> reverse_letters('The quick brown fox jumped over the lazy dog.') - 'ehT kciuq nworb xof depmuj revo eht yzal .god' - >>> reverse_letters('Is this true?') - 'sI siht ?eurt' - >>> reverse_letters("I love Python") - 'I evol nohtyP' + Reverse all words that are longer than the given length of characters in a sentence. + If unspecified, length is taken as 0 + + >>> reverse_letters("Hey wollef sroirraw", 3) + 'Hey fellow warriors' + >>> reverse_letters("nohtyP is nohtyP", 2) + 'Python is Python' + >>> reverse_letters("1 12 123 1234 54321 654321", 0) + '1 21 321 4321 12345 123456' + >>> reverse_letters("racecar") + 'racecar' """ - return " ".join([word[::-1] for word in input_str.split()]) + return " ".join( + "".join(word[::-1]) if len(word) > length else word for word in sentence.split() + ) if __name__ == "__main__": import doctest doctest.testmod() + print(reverse_letters("Hey wollef sroirraw")) diff --git a/strings/reverse_long_words.py b/strings/reverse_long_words.py deleted file mode 100644 index 39ef11513..000000000 --- a/strings/reverse_long_words.py +++ /dev/null @@ -1,21 +0,0 @@ -def reverse_long_words(sentence: str) -> str: - """ - Reverse all words that are longer than 4 characters in a sentence. - - >>> reverse_long_words("Hey wollef sroirraw") - 'Hey fellow warriors' - >>> reverse_long_words("nohtyP is nohtyP") - 'Python is Python' - >>> reverse_long_words("1 12 123 1234 54321 654321") - '1 12 123 1234 12345 123456' - """ - return " ".join( - "".join(word[::-1]) if len(word) > 4 else word for word in sentence.split() - ) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() - print(reverse_long_words("Hey wollef sroirraw")) From a2b695dabd6e9c6dd82784bd534c2e7570939be2 Mon Sep 17 00:00:00 2001 From: Megan Payne Date: Sun, 8 Oct 2023 23:33:50 +0200 Subject: [PATCH 067/306] Added Germain primes algorithm to the maths folder (#10120) * Added algorithm for Germain Primes to maths folder * Fixed test errors Germain primes. * Formatting Germain primes after pre-commit * Fixed path to maths * Update maths/germain_primes.py Co-authored-by: Tianyi Zheng * Update maths/germain_primes.py Co-authored-by: Tianyi Zheng * Added function for safe primes * Update maths/germain_primes.py Co-authored-by: Tianyi Zheng * Apply suggestions from code review --------- Co-authored-by: Megan Payne Co-authored-by: Tianyi Zheng --- maths/germain_primes.py | 72 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 maths/germain_primes.py diff --git a/maths/germain_primes.py b/maths/germain_primes.py new file mode 100644 index 000000000..078d1967f --- /dev/null +++ b/maths/germain_primes.py @@ -0,0 +1,72 @@ +""" +A Sophie Germain prime is any prime p, where 2p + 1 is also prime. +The second number, 2p + 1 is called a safe prime. + +Examples of Germain primes include: 2, 3, 5, 11, 23 + +Their corresponding safe primes: 5, 7, 11, 23, 47 +https://en.wikipedia.org/wiki/Safe_and_Sophie_Germain_primes +""" + +from maths.prime_check import is_prime + + +def is_germain_prime(number: int) -> bool: + """Checks if input number and 2*number + 1 are prime. + + >>> is_germain_prime(3) + True + >>> is_germain_prime(11) + True + >>> is_germain_prime(4) + False + >>> is_germain_prime(23) + True + >>> is_germain_prime(13) + False + >>> is_germain_prime(20) + False + >>> is_germain_prime('abc') + Traceback (most recent call last): + ... + TypeError: Input value must be a positive integer. Input value: abc + """ + if not isinstance(number, int) or number < 1: + msg = f"Input value must be a positive integer. Input value: {number}" + raise TypeError(msg) + + return is_prime(number) and is_prime(2 * number + 1) + + +def is_safe_prime(number: int) -> bool: + """Checks if input number and (number - 1)/2 are prime. + The smallest safe prime is 5, with the Germain prime is 2. + + >>> is_safe_prime(5) + True + >>> is_safe_prime(11) + True + >>> is_safe_prime(1) + False + >>> is_safe_prime(2) + False + >>> is_safe_prime(3) + False + >>> is_safe_prime(47) + True + >>> is_safe_prime('abc') + Traceback (most recent call last): + ... + TypeError: Input value must be a positive integer. Input value: abc + """ + if not isinstance(number, int) or number < 1: + msg = f"Input value must be a positive integer. Input value: {number}" + raise TypeError(msg) + + return (number - 1) % 2 == 0 and is_prime(number) and is_prime((number - 1) // 2) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 2260961a803ebd037f471ef18fa5032a547d42da Mon Sep 17 00:00:00 2001 From: Saahil Mahato <115351000+saahil-mahato@users.noreply.github.com> Date: Mon, 9 Oct 2023 05:04:28 +0545 Subject: [PATCH 068/306] Add Soboleva Modified Hyberbolic Tangent function (#10043) * Add Sobovela Modified Hyberbolic Tangent function * fix: typo * Update and rename sobovela_modified_hyperbolic_tangent.py to soboleva_modified_hyperbolic_tangent.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: typo * Apply suggestions from code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- .../soboleva_modified_hyperbolic_tangent.py | 49 +++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 neural_network/activation_functions/soboleva_modified_hyperbolic_tangent.py diff --git a/neural_network/activation_functions/soboleva_modified_hyperbolic_tangent.py b/neural_network/activation_functions/soboleva_modified_hyperbolic_tangent.py new file mode 100644 index 000000000..603ac0b7e --- /dev/null +++ b/neural_network/activation_functions/soboleva_modified_hyperbolic_tangent.py @@ -0,0 +1,49 @@ +""" +This script implements the Soboleva Modified Hyperbolic Tangent function. + +The function applies the Soboleva Modified Hyperbolic Tangent function +to each element of the vector. + +More details about the activation function can be found on: +https://en.wikipedia.org/wiki/Soboleva_modified_hyperbolic_tangent +""" + + +import numpy as np + + +def soboleva_modified_hyperbolic_tangent( + vector: np.ndarray, a_value: float, b_value: float, c_value: float, d_value: float +) -> np.ndarray: + """ + Implements the Soboleva Modified Hyperbolic Tangent function + + Parameters: + vector (ndarray): A vector that consists of numeric values + a_value (float): parameter a of the equation + b_value (float): parameter b of the equation + c_value (float): parameter c of the equation + d_value (float): parameter d of the equation + + Returns: + vector (ndarray): Input array after applying SMHT function + + >>> vector = np.array([5.4, -2.4, 6.3, -5.23, 3.27, 0.56]) + >>> soboleva_modified_hyperbolic_tangent(vector, 0.2, 0.4, 0.6, 0.8) + array([ 0.11075085, -0.28236685, 0.07861169, -0.1180085 , 0.22999056, + 0.1566043 ]) + """ + + # Separate the numerator and denominator for simplicity + # Calculate the numerator and denominator element-wise + numerator = np.exp(a_value * vector) - np.exp(-b_value * vector) + denominator = np.exp(c_value * vector) + np.exp(-d_value * vector) + + # Calculate and return the final result element-wise + return numerator / denominator + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From ed19b1cf0c3d8284027e17cc025d65b3f924acc0 Mon Sep 17 00:00:00 2001 From: Saahil Mahato <115351000+saahil-mahato@users.noreply.github.com> Date: Mon, 9 Oct 2023 05:19:50 +0545 Subject: [PATCH 069/306] Add binary step activation function (#10030) * Add binary step activation function * fix: ruff line too long error * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * refactor: add link to directory * revert: add link to directory * fix: algorithm bug and docs * Update neural_network/activation_functions/binary_step.py * fix: ruff line too long error --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- .../activation_functions/binary_step.py | 36 +++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 neural_network/activation_functions/binary_step.py diff --git a/neural_network/activation_functions/binary_step.py b/neural_network/activation_functions/binary_step.py new file mode 100644 index 000000000..8f8f4d405 --- /dev/null +++ b/neural_network/activation_functions/binary_step.py @@ -0,0 +1,36 @@ +""" +This script demonstrates the implementation of the Binary Step function. + +It's an activation function in which the neuron is activated if the input is positive +or 0, else it is deactivated + +It's a simple activation function which is mentioned in this wikipedia article: +https://en.wikipedia.org/wiki/Activation_function +""" + + +import numpy as np + + +def binary_step(vector: np.ndarray) -> np.ndarray: + """ + Implements the binary step function + + Parameters: + vector (ndarray): A vector that consists of numeric values + + Returns: + vector (ndarray): Input vector after applying binary step function + + >>> vector = np.array([-1.2, 0, 2, 1.45, -3.7, 0.3]) + >>> binary_step(vector) + array([0, 1, 1, 1, 0, 1]) + """ + + return np.where(vector >= 0, 1, 0) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 12e8e9ca876ed3ae7f1effa1de407ca29a06cb36 Mon Sep 17 00:00:00 2001 From: Sai Harsha Kottapalli Date: Mon, 9 Oct 2023 17:36:16 +0530 Subject: [PATCH 070/306] Add DocTests to is_palindrome.py (#10081) * add doctest ut * test complete * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * format * ruff update * cover line 154 * Update data_structures/linked_list/is_palindrome.py Co-authored-by: Christian Clauss * use dataclass * pre-commit fix * Fix mypy errors * use future annotations --------- Co-authored-by: Harsha Kottapalli Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- data_structures/linked_list/is_palindrome.py | 172 +++++++++++++++---- 1 file changed, 140 insertions(+), 32 deletions(-) diff --git a/data_structures/linked_list/is_palindrome.py b/data_structures/linked_list/is_palindrome.py index d540fb69f..7d89f085c 100644 --- a/data_structures/linked_list/is_palindrome.py +++ b/data_structures/linked_list/is_palindrome.py @@ -1,65 +1,167 @@ -def is_palindrome(head): +from __future__ import annotations + +from dataclasses import dataclass + + +@dataclass +class ListNode: + val: int = 0 + next_node: ListNode | None = None + + +def is_palindrome(head: ListNode | None) -> bool: + """ + Check if a linked list is a palindrome. + + Args: + head: The head of the linked list. + + Returns: + bool: True if the linked list is a palindrome, False otherwise. + + Examples: + >>> is_palindrome(None) + True + + >>> is_palindrome(ListNode(1)) + True + + >>> is_palindrome(ListNode(1, ListNode(2))) + False + + >>> is_palindrome(ListNode(1, ListNode(2, ListNode(1)))) + True + + >>> is_palindrome(ListNode(1, ListNode(2, ListNode(2, ListNode(1))))) + True + """ if not head: return True # split the list to two parts - fast, slow = head.next, head - while fast and fast.next: - fast = fast.next.next - slow = slow.next - second = slow.next - slow.next = None # Don't forget here! But forget still works! + fast: ListNode | None = head.next_node + slow: ListNode | None = head + while fast and fast.next_node: + fast = fast.next_node.next_node + slow = slow.next_node if slow else None + if slow: + # slow will always be defined, + # adding this check to resolve mypy static check + second = slow.next_node + slow.next_node = None # Don't forget here! But forget still works! # reverse the second part - node = None + node: ListNode | None = None while second: - nxt = second.next - second.next = node + nxt = second.next_node + second.next_node = node node = second second = nxt # compare two parts # second part has the same or one less node - while node: + while node and head: if node.val != head.val: return False - node = node.next - head = head.next + node = node.next_node + head = head.next_node return True -def is_palindrome_stack(head): - if not head or not head.next: +def is_palindrome_stack(head: ListNode | None) -> bool: + """ + Check if a linked list is a palindrome using a stack. + + Args: + head (ListNode): The head of the linked list. + + Returns: + bool: True if the linked list is a palindrome, False otherwise. + + Examples: + >>> is_palindrome_stack(None) + True + + >>> is_palindrome_stack(ListNode(1)) + True + + >>> is_palindrome_stack(ListNode(1, ListNode(2))) + False + + >>> is_palindrome_stack(ListNode(1, ListNode(2, ListNode(1)))) + True + + >>> is_palindrome_stack(ListNode(1, ListNode(2, ListNode(2, ListNode(1))))) + True + """ + if not head or not head.next_node: return True # 1. Get the midpoint (slow) - slow = fast = cur = head - while fast and fast.next: - fast, slow = fast.next.next, slow.next + slow: ListNode | None = head + fast: ListNode | None = head + while fast and fast.next_node: + fast = fast.next_node.next_node + slow = slow.next_node if slow else None - # 2. Push the second half into the stack - stack = [slow.val] - while slow.next: - slow = slow.next - stack.append(slow.val) + # slow will always be defined, + # adding this check to resolve mypy static check + if slow: + stack = [slow.val] - # 3. Comparison - while stack: - if stack.pop() != cur.val: - return False - cur = cur.next + # 2. Push the second half into the stack + while slow.next_node: + slow = slow.next_node + stack.append(slow.val) + + # 3. Comparison + cur: ListNode | None = head + while stack and cur: + if stack.pop() != cur.val: + return False + cur = cur.next_node return True -def is_palindrome_dict(head): - if not head or not head.next: +def is_palindrome_dict(head: ListNode | None) -> bool: + """ + Check if a linked list is a palindrome using a dictionary. + + Args: + head (ListNode): The head of the linked list. + + Returns: + bool: True if the linked list is a palindrome, False otherwise. + + Examples: + >>> is_palindrome_dict(None) + True + + >>> is_palindrome_dict(ListNode(1)) + True + + >>> is_palindrome_dict(ListNode(1, ListNode(2))) + False + + >>> is_palindrome_dict(ListNode(1, ListNode(2, ListNode(1)))) + True + + >>> is_palindrome_dict(ListNode(1, ListNode(2, ListNode(2, ListNode(1))))) + True + + >>> is_palindrome_dict(\ + ListNode(\ + 1, ListNode(2, ListNode(1, ListNode(3, ListNode(2, ListNode(1))))))) + False + """ + if not head or not head.next_node: return True - d = {} + d: dict[int, list[int]] = {} pos = 0 while head: if head.val in d: d[head.val].append(pos) else: d[head.val] = [pos] - head = head.next + head = head.next_node pos += 1 checksum = pos - 1 middle = 0 @@ -75,3 +177,9 @@ def is_palindrome_dict(head): if middle > 1: return False return True + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 876087be998d5b366d68cbb9394b6b92b7f619f6 Mon Sep 17 00:00:00 2001 From: Sai Harsha Kottapalli Date: Mon, 9 Oct 2023 17:46:43 +0530 Subject: [PATCH 071/306] Add DocTests to magicdiamondpattern.py (#10135) * magicdiamondpattern doctest * remove start part --------- Co-authored-by: Harsha Kottapalli --- other/magicdiamondpattern.py | 74 ++++++++++++++++++++++++------------ 1 file changed, 49 insertions(+), 25 deletions(-) diff --git a/other/magicdiamondpattern.py b/other/magicdiamondpattern.py index 89b973bb4..58889280a 100644 --- a/other/magicdiamondpattern.py +++ b/other/magicdiamondpattern.py @@ -4,52 +4,76 @@ # Function to print upper half of diamond (pyramid) def floyd(n): """ - Parameters: - n : size of pattern + Print the upper half of a diamond pattern with '*' characters. + + Args: + n (int): Size of the pattern. + + Examples: + >>> floyd(3) + ' * \\n * * \\n* * * \\n' + + >>> floyd(5) + ' * \\n * * \\n * * * \\n * * * * \\n* * * * * \\n' """ + result = "" for i in range(n): for _ in range(n - i - 1): # printing spaces - print(" ", end="") + result += " " for _ in range(i + 1): # printing stars - print("* ", end="") - print() + result += "* " + result += "\n" + return result # Function to print lower half of diamond (pyramid) def reverse_floyd(n): """ - Parameters: - n : size of pattern + Print the lower half of a diamond pattern with '*' characters. + + Args: + n (int): Size of the pattern. + + Examples: + >>> reverse_floyd(3) + '* * * \\n * * \\n * \\n ' + + >>> reverse_floyd(5) + '* * * * * \\n * * * * \\n * * * \\n * * \\n * \\n ' """ + result = "" for i in range(n, 0, -1): for _ in range(i, 0, -1): # printing stars - print("* ", end="") - print() + result += "* " + result += "\n" for _ in range(n - i + 1, 0, -1): # printing spaces - print(" ", end="") + result += " " + return result # Function to print complete diamond pattern of "*" def pretty_print(n): """ - Parameters: - n : size of pattern + Print a complete diamond pattern with '*' characters. + + Args: + n (int): Size of the pattern. + + Examples: + >>> pretty_print(0) + ' ... .... nothing printing :(' + + >>> pretty_print(3) + ' * \\n * * \\n* * * \\n* * * \\n * * \\n * \\n ' """ if n <= 0: - print(" ... .... nothing printing :(") - return - floyd(n) # upper half - reverse_floyd(n) # lower half + return " ... .... nothing printing :(" + upper_half = floyd(n) # upper half + lower_half = reverse_floyd(n) # lower half + return upper_half + lower_half if __name__ == "__main__": - print(r"| /\ | |- | |- |--| |\ /| |-") - print(r"|/ \| |- |_ |_ |__| | \/ | |_") - K = 1 - while K: - user_number = int(input("enter the number and , and see the magic : ")) - print() - pretty_print(user_number) - K = int(input("press 0 to exit... and 1 to continue...")) + import doctest - print("Good Bye...") + doctest.testmod() From 583a614fefaa9c932e6d650abfea2eaa75a93b05 Mon Sep 17 00:00:00 2001 From: Siddik Patel <70135775+Siddikpatel@users.noreply.github.com> Date: Mon, 9 Oct 2023 17:49:12 +0530 Subject: [PATCH 072/306] Removed redundant greatest_common_divisor code (#9358) * Deleted greatest_common_divisor def from many files and instead imported the method from Maths folder * Deleted greatest_common_divisor def from many files and instead imported the method from Maths folder, also fixed comments * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Deleted greatest_common_divisor def from many files and instead imported the method from Maths folder, also fixed comments * Imports organized * recursive gcd function implementation rolledback * more gcd duplicates removed * more gcd duplicates removed * Update maths/carmichael_number.py * updated files * moved a file to another location --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- blockchain/diophantine_equation.py | 32 +++--------------------- ciphers/affine_cipher.py | 6 +++-- ciphers/cryptomath_module.py | 7 ++---- ciphers/hill_cipher.py | 14 +---------- ciphers/rsa_key_generator.py | 4 ++- maths/carmichael_number.py | 11 ++------ maths/least_common_multiple.py | 22 ++-------------- maths/primelib.py | 40 +++--------------------------- project_euler/problem_005/sol2.py | 19 ++------------ 9 files changed, 24 insertions(+), 131 deletions(-) diff --git a/blockchain/diophantine_equation.py b/blockchain/diophantine_equation.py index 22b0cad75..7110d9023 100644 --- a/blockchain/diophantine_equation.py +++ b/blockchain/diophantine_equation.py @@ -1,11 +1,13 @@ from __future__ import annotations +from maths.greatest_common_divisor import greatest_common_divisor + def diophantine(a: int, b: int, c: int) -> tuple[float, float]: """ Diophantine Equation : Given integers a,b,c ( at least one of a and b != 0), the diophantine equation a*x + b*y = c has a solution (where x and y are integers) - iff gcd(a,b) divides c. + iff greatest_common_divisor(a,b) divides c. GCD ( Greatest Common Divisor ) or HCF ( Highest Common Factor ) @@ -22,7 +24,7 @@ def diophantine(a: int, b: int, c: int) -> tuple[float, float]: assert ( c % greatest_common_divisor(a, b) == 0 - ) # greatest_common_divisor(a,b) function implemented below + ) # greatest_common_divisor(a,b) is in maths directory (d, x, y) = extended_gcd(a, b) # extended_gcd(a,b) function implemented below r = c / d return (r * x, r * y) @@ -69,32 +71,6 @@ def diophantine_all_soln(a: int, b: int, c: int, n: int = 2) -> None: print(x, y) -def greatest_common_divisor(a: int, b: int) -> int: - """ - Euclid's Lemma : d divides a and b, if and only if d divides a-b and b - - Euclid's Algorithm - - >>> greatest_common_divisor(7,5) - 1 - - Note : In number theory, two integers a and b are said to be relatively prime, - mutually prime, or co-prime if the only positive integer (factor) that - divides both of them is 1 i.e., gcd(a,b) = 1. - - >>> greatest_common_divisor(121, 11) - 11 - - """ - if a < b: - a, b = b, a - - while a % b != 0: - a, b = b, a % b - - return b - - def extended_gcd(a: int, b: int) -> tuple[int, int, int]: """ Extended Euclid's Algorithm : If d divides a and b and d = a*x + b*y for integers diff --git a/ciphers/affine_cipher.py b/ciphers/affine_cipher.py index cd1e33b88..10d16367c 100644 --- a/ciphers/affine_cipher.py +++ b/ciphers/affine_cipher.py @@ -1,6 +1,8 @@ import random import sys +from maths.greatest_common_divisor import gcd_by_iterative + from . import cryptomath_module as cryptomath SYMBOLS = ( @@ -26,7 +28,7 @@ def check_keys(key_a: int, key_b: int, mode: str) -> None: "Key A must be greater than 0 and key B must " f"be between 0 and {len(SYMBOLS) - 1}." ) - if cryptomath.gcd(key_a, len(SYMBOLS)) != 1: + if gcd_by_iterative(key_a, len(SYMBOLS)) != 1: sys.exit( f"Key A {key_a} and the symbol set size {len(SYMBOLS)} " "are not relatively prime. Choose a different key." @@ -76,7 +78,7 @@ def get_random_key() -> int: while True: key_b = random.randint(2, len(SYMBOLS)) key_b = random.randint(2, len(SYMBOLS)) - if cryptomath.gcd(key_b, len(SYMBOLS)) == 1 and key_b % len(SYMBOLS) != 0: + if gcd_by_iterative(key_b, len(SYMBOLS)) == 1 and key_b % len(SYMBOLS) != 0: return key_b * len(SYMBOLS) + key_b diff --git a/ciphers/cryptomath_module.py b/ciphers/cryptomath_module.py index 6f15f7b73..02e94e4b9 100644 --- a/ciphers/cryptomath_module.py +++ b/ciphers/cryptomath_module.py @@ -1,11 +1,8 @@ -def gcd(a: int, b: int) -> int: - while a != 0: - a, b = b % a, a - return b +from maths.greatest_common_divisor import gcd_by_iterative def find_mod_inverse(a: int, m: int) -> int: - if gcd(a, m) != 1: + if gcd_by_iterative(a, m) != 1: msg = f"mod inverse of {a!r} and {m!r} does not exist" raise ValueError(msg) u1, u2, u3 = 1, 0, a diff --git a/ciphers/hill_cipher.py b/ciphers/hill_cipher.py index b4424e822..1201fda90 100644 --- a/ciphers/hill_cipher.py +++ b/ciphers/hill_cipher.py @@ -39,19 +39,7 @@ import string import numpy - -def greatest_common_divisor(a: int, b: int) -> int: - """ - >>> greatest_common_divisor(4, 8) - 4 - >>> greatest_common_divisor(8, 4) - 4 - >>> greatest_common_divisor(4, 7) - 1 - >>> greatest_common_divisor(0, 10) - 10 - """ - return b if a == 0 else greatest_common_divisor(b % a, a) +from maths.greatest_common_divisor import greatest_common_divisor class HillCipher: diff --git a/ciphers/rsa_key_generator.py b/ciphers/rsa_key_generator.py index eedc73368..44970e8cb 100644 --- a/ciphers/rsa_key_generator.py +++ b/ciphers/rsa_key_generator.py @@ -2,6 +2,8 @@ import os import random import sys +from maths.greatest_common_divisor import gcd_by_iterative + from . import cryptomath_module, rabin_miller @@ -27,7 +29,7 @@ def generate_key(key_size: int) -> tuple[tuple[int, int], tuple[int, int]]: # Generate e that is relatively prime to (p - 1) * (q - 1) while True: e = random.randrange(2 ** (key_size - 1), 2 ** (key_size)) - if cryptomath_module.gcd(e, (p - 1) * (q - 1)) == 1: + if gcd_by_iterative(e, (p - 1) * (q - 1)) == 1: break # Calculate d that is mod inverse of e diff --git a/maths/carmichael_number.py b/maths/carmichael_number.py index c9c144759..81712520f 100644 --- a/maths/carmichael_number.py +++ b/maths/carmichael_number.py @@ -10,14 +10,7 @@ satisfies the following modular arithmetic condition: Examples of Carmichael Numbers: 561, 1105, ... https://en.wikipedia.org/wiki/Carmichael_number """ - - -def gcd(a: int, b: int) -> int: - if a < b: - return gcd(b, a) - if a % b == 0: - return b - return gcd(b, a % b) +from maths.greatest_common_divisor import greatest_common_divisor def power(x: int, y: int, mod: int) -> int: @@ -33,7 +26,7 @@ def power(x: int, y: int, mod: int) -> int: def is_carmichael_number(n: int) -> bool: b = 2 while b < n: - if gcd(b, n) == 1 and power(b, n - 1, n) != 1: + if greatest_common_divisor(b, n) == 1 and power(b, n - 1, n) != 1: return False b += 1 return True diff --git a/maths/least_common_multiple.py b/maths/least_common_multiple.py index 10cc63ac7..4f28da8ab 100644 --- a/maths/least_common_multiple.py +++ b/maths/least_common_multiple.py @@ -1,6 +1,8 @@ import unittest from timeit import timeit +from maths.greatest_common_divisor import greatest_common_divisor + def least_common_multiple_slow(first_num: int, second_num: int) -> int: """ @@ -20,26 +22,6 @@ def least_common_multiple_slow(first_num: int, second_num: int) -> int: return common_mult -def greatest_common_divisor(a: int, b: int) -> int: - """ - Calculate Greatest Common Divisor (GCD). - see greatest_common_divisor.py - >>> greatest_common_divisor(24, 40) - 8 - >>> greatest_common_divisor(1, 1) - 1 - >>> greatest_common_divisor(1, 800) - 1 - >>> greatest_common_divisor(11, 37) - 1 - >>> greatest_common_divisor(3, 5) - 1 - >>> greatest_common_divisor(16, 4) - 4 - """ - return b if a == 0 else greatest_common_divisor(b % a, a) - - def least_common_multiple_fast(first_num: int, second_num: int) -> int: """ Find the least common multiple of two numbers. diff --git a/maths/primelib.py b/maths/primelib.py index 28b5aee9d..cf01750cf 100644 --- a/maths/primelib.py +++ b/maths/primelib.py @@ -21,7 +21,6 @@ get_primes_between(pNumber1, pNumber2) is_even(number) is_odd(number) -gcd(number1, number2) // greatest common divisor kg_v(number1, number2) // least common multiple get_divisors(number) // all divisors of 'number' inclusive 1, number is_perfect_number(number) @@ -40,6 +39,8 @@ goldbach(number) // Goldbach's assumption from math import sqrt +from maths.greatest_common_divisor import gcd_by_iterative + def is_prime(number: int) -> bool: """ @@ -317,39 +318,6 @@ def goldbach(number): # ---------------------------------------------- -def gcd(number1, number2): - """ - Greatest common divisor - input: two positive integer 'number1' and 'number2' - returns the greatest common divisor of 'number1' and 'number2' - """ - - # precondition - assert ( - isinstance(number1, int) - and isinstance(number2, int) - and (number1 >= 0) - and (number2 >= 0) - ), "'number1' and 'number2' must been positive integer." - - rest = 0 - - while number2 != 0: - rest = number1 % number2 - number1 = number2 - number2 = rest - - # precondition - assert isinstance(number1, int) and ( - number1 >= 0 - ), "'number' must been from type int and positive" - - return number1 - - -# ---------------------------------------------------- - - def kg_v(number1, number2): """ Least common multiple @@ -567,14 +535,14 @@ def simplify_fraction(numerator, denominator): ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. - gcd_of_fraction = gcd(abs(numerator), abs(denominator)) + gcd_of_fraction = gcd_by_iterative(abs(numerator), abs(denominator)) # precondition assert ( isinstance(gcd_of_fraction, int) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) - ), "Error in function gcd(...,...)" + ), "Error in function gcd_by_iterative(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) diff --git a/project_euler/problem_005/sol2.py b/project_euler/problem_005/sol2.py index 1b3e5e130..4558e21fd 100644 --- a/project_euler/problem_005/sol2.py +++ b/project_euler/problem_005/sol2.py @@ -1,3 +1,5 @@ +from maths.greatest_common_divisor import greatest_common_divisor + """ Project Euler Problem 5: https://projecteuler.net/problem=5 @@ -16,23 +18,6 @@ References: """ -def greatest_common_divisor(x: int, y: int) -> int: - """ - Euclidean Greatest Common Divisor algorithm - - >>> greatest_common_divisor(0, 0) - 0 - >>> greatest_common_divisor(23, 42) - 1 - >>> greatest_common_divisor(15, 33) - 3 - >>> greatest_common_divisor(12345, 67890) - 15 - """ - - return x if y == 0 else greatest_common_divisor(y, x % y) - - def lcm(x: int, y: int) -> int: """ Least Common Multiple. From 53d78b9cc09021c8f65fae41f8b345304a88aedd Mon Sep 17 00:00:00 2001 From: Kausthub Kannan Date: Mon, 9 Oct 2023 20:03:47 +0530 Subject: [PATCH 073/306] Added Huber Loss Function (#10141) --- machine_learning/loss_functions/huber_loss.py | 52 +++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 machine_learning/loss_functions/huber_loss.py diff --git a/machine_learning/loss_functions/huber_loss.py b/machine_learning/loss_functions/huber_loss.py new file mode 100644 index 000000000..202e013f2 --- /dev/null +++ b/machine_learning/loss_functions/huber_loss.py @@ -0,0 +1,52 @@ +""" +Huber Loss Function + +Description: +Huber loss function describes the penalty incurred by an estimation procedure. +It serves as a measure of the model's accuracy in regression tasks. + +Formula: +Huber Loss = if |y_true - y_pred| <= delta then 0.5 * (y_true - y_pred)^2 + else delta * |y_true - y_pred| - 0.5 * delta^2 + +Source: +[Wikipedia - Huber Loss](https://en.wikipedia.org/wiki/Huber_loss) +""" + +import numpy as np + + +def huber_loss(y_true: np.ndarray, y_pred: np.ndarray, delta: float) -> float: + """ + Calculate the mean of Huber Loss. + + Parameters: + - y_true: The true values (ground truth). + - y_pred: The predicted values. + + Returns: + - huber_loss: The mean of Huber Loss between y_true and y_pred. + + Example usage: + >>> true_values = np.array([0.9, 10.0, 2.0, 1.0, 5.2]) + >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) + >>> np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102) + True + >>> true_labels = np.array([11.0, 21.0, 3.32, 4.0, 5.0]) + >>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0]) + >>> np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164) + True + """ + + if len(y_true) != len(y_pred): + raise ValueError("Input arrays must have the same length.") + + huber_mse = 0.5 * (y_true - y_pred) ** 2 + huber_mae = delta * (np.abs(y_true - y_pred) - 0.5 * delta) + return np.where(np.abs(y_true - y_pred) <= delta, huber_mse, huber_mae).mean() + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From c0da015b7d49f9f6e99fffd279f65c5605a0ebe1 Mon Sep 17 00:00:00 2001 From: Sai Harsha Kottapalli Date: Mon, 9 Oct 2023 20:49:05 +0530 Subject: [PATCH 074/306] Add DocTests to diffie.py (#10156) * diffie doctest * fix ut * update doctest --------- Co-authored-by: Harsha Kottapalli --- ciphers/diffie.py | 45 +++++++++++++++++++++++++++++++++------------ 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/ciphers/diffie.py b/ciphers/diffie.py index 4ff90be00..1e1e86899 100644 --- a/ciphers/diffie.py +++ b/ciphers/diffie.py @@ -1,11 +1,28 @@ from __future__ import annotations -def find_primitive(n: int) -> int | None: - for r in range(1, n): +def find_primitive(modulus: int) -> int | None: + """ + Find a primitive root modulo modulus, if one exists. + + Args: + modulus : The modulus for which to find a primitive root. + + Returns: + The primitive root if one exists, or None if there is none. + + Examples: + >>> find_primitive(7) # Modulo 7 has primitive root 3 + 3 + >>> find_primitive(11) # Modulo 11 has primitive root 2 + 2 + >>> find_primitive(8) == None # Modulo 8 has no primitive root + True + """ + for r in range(1, modulus): li = [] - for x in range(n - 1): - val = pow(r, x, n) + for x in range(modulus - 1): + val = pow(r, x, modulus) if val in li: break li.append(val) @@ -15,18 +32,22 @@ def find_primitive(n: int) -> int | None: if __name__ == "__main__": - q = int(input("Enter a prime number q: ")) - a = find_primitive(q) - if a is None: - print(f"Cannot find the primitive for the value: {a!r}") + import doctest + + doctest.testmod() + + prime = int(input("Enter a prime number q: ")) + primitive_root = find_primitive(prime) + if primitive_root is None: + print(f"Cannot find the primitive for the value: {primitive_root!r}") else: a_private = int(input("Enter private key of A: ")) - a_public = pow(a, a_private, q) + a_public = pow(primitive_root, a_private, prime) b_private = int(input("Enter private key of B: ")) - b_public = pow(a, b_private, q) + b_public = pow(primitive_root, b_private, prime) - a_secret = pow(b_public, a_private, q) - b_secret = pow(a_public, b_private, q) + a_secret = pow(b_public, a_private, prime) + b_secret = pow(a_public, b_private, prime) print("The key value generated by A is: ", a_secret) print("The key value generated by B is: ", b_secret) From ba828fe621d1f5623fffcf0014b243da3a6122fc Mon Sep 17 00:00:00 2001 From: Kamil <32775019+quant12345@users.noreply.github.com> Date: Mon, 9 Oct 2023 20:46:38 +0500 Subject: [PATCH 075/306] test_digital_image_processing -> test_local_binary_pattern replacing a large image with a smaller one (#10161) * Replacing the generator with numpy vector operations from lu_decomposition. * Revert "Replacing the generator with numpy vector operations from lu_decomposition." This reverts commit ad217c66165898d62b76cc89ba09c2d7049b6448. * Replaced lena.jpg with lena_small.jpg to make tests faster. * Update digital_image_processing/test_digital_image_processing.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update test_digital_image_processing.py tests fail, I'll try an empty commit * Apply suggestions from code review * Update test_digital_image_processing.py added clarifications * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update test_digital_image_processing.py --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- .../test_digital_image_processing.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/digital_image_processing/test_digital_image_processing.py b/digital_image_processing/test_digital_image_processing.py index 528b4bc3b..2e5630458 100644 --- a/digital_image_processing/test_digital_image_processing.py +++ b/digital_image_processing/test_digital_image_processing.py @@ -96,9 +96,16 @@ def test_nearest_neighbour( def test_local_binary_pattern(): - file_path = "digital_image_processing/image_data/lena.jpg" + # pull request 10161 before: + # "digital_image_processing/image_data/lena.jpg" + # after: "digital_image_processing/image_data/lena_small.jpg" - # Reading the image and converting it to grayscale. + from os import getenv # Speed up our Continuous Integration tests + + file_name = "lena_small.jpg" if getenv("CI") else "lena.jpg" + file_path = f"digital_image_processing/image_data/{file_name}" + + # Reading the image and converting it to grayscale image = imread(file_path, 0) # Test for get_neighbors_pixel function() return not None From 844270c6e91387940e062a1522f58bde1026bb08 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 9 Oct 2023 19:42:07 +0200 Subject: [PATCH 076/306] Remove backslashes from is_palindrome.py (#10169) @SaiHarshaK Fixes https://github.com/TheAlgorithms/Python/pull/10081#discussion_r1349651289 --- data_structures/linked_list/is_palindrome.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/data_structures/linked_list/is_palindrome.py b/data_structures/linked_list/is_palindrome.py index 7d89f085c..f949d9a2f 100644 --- a/data_structures/linked_list/is_palindrome.py +++ b/data_structures/linked_list/is_palindrome.py @@ -147,9 +147,11 @@ def is_palindrome_dict(head: ListNode | None) -> bool: >>> is_palindrome_dict(ListNode(1, ListNode(2, ListNode(2, ListNode(1))))) True - >>> is_palindrome_dict(\ - ListNode(\ - 1, ListNode(2, ListNode(1, ListNode(3, ListNode(2, ListNode(1))))))) + >>> is_palindrome_dict( + ... ListNode( + ... 1, ListNode(2, ListNode(1, ListNode(3, ListNode(2, ListNode(1))))) + ... ) + ... ) False """ if not head or not head.next_node: From 5d0a46814e5b69f79d623187912c0f81ab5ab7a7 Mon Sep 17 00:00:00 2001 From: aryan1165 <111041731+aryan1165@users.noreply.github.com> Date: Tue, 10 Oct 2023 01:08:04 +0530 Subject: [PATCH 077/306] Added ciphers/permutation_cipher.py. (#9163) * Added permutation_cipher.py * Added type hints for parameters * Added doctest in functions * Update ciphers/permutation_cipher.py Ya i felt the same but held back because there is a implementation of transposition_cipher.py. But that's is different from the one i have implemented here. Co-authored-by: Tianyi Zheng * Update ciphers/permutation_cipher.py Co-authored-by: Tianyi Zheng * Update ciphers/permutation_cipher.py Co-authored-by: Tianyi Zheng * Update ciphers/permutation_cipher.py Co-authored-by: Tianyi Zheng * Update ciphers/permutation_cipher.py Co-authored-by: Tianyi Zheng * Update ciphers/permutation_cipher.py Co-authored-by: Tianyi Zheng * Update ciphers/permutation_cipher.py Co-authored-by: Tianyi Zheng * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes --------- Co-authored-by: Tianyi Zheng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- ciphers/permutation_cipher.py | 142 ++++++++++++++++++++++++++++++++++ 1 file changed, 142 insertions(+) create mode 100644 ciphers/permutation_cipher.py diff --git a/ciphers/permutation_cipher.py b/ciphers/permutation_cipher.py new file mode 100644 index 000000000..c3f3fd1f7 --- /dev/null +++ b/ciphers/permutation_cipher.py @@ -0,0 +1,142 @@ +""" +The permutation cipher, also called the transposition cipher, is a simple encryption +technique that rearranges the characters in a message based on a secret key. It +divides the message into blocks and applies a permutation to the characters within +each block according to the key. The key is a sequence of unique integers that +determine the order of character rearrangement. + +For more info: https://www.nku.edu/~christensen/1402%20permutation%20ciphers.pdf +""" +import random + + +def generate_valid_block_size(message_length: int) -> int: + """ + Generate a valid block size that is a factor of the message length. + + Args: + message_length (int): The length of the message. + + Returns: + int: A valid block size. + + Example: + >>> random.seed(1) + >>> generate_valid_block_size(12) + 3 + """ + block_sizes = [ + block_size + for block_size in range(2, message_length + 1) + if message_length % block_size == 0 + ] + return random.choice(block_sizes) + + +def generate_permutation_key(block_size: int) -> list[int]: + """ + Generate a random permutation key of a specified block size. + + Args: + block_size (int): The size of each permutation block. + + Returns: + list[int]: A list containing a random permutation of digits. + + Example: + >>> random.seed(0) + >>> generate_permutation_key(4) + [2, 0, 1, 3] + """ + digits = list(range(block_size)) + random.shuffle(digits) + return digits + + +def encrypt( + message: str, key: list[int] | None = None, block_size: int | None = None +) -> tuple[str, list[int]]: + """ + Encrypt a message using a permutation cipher with block rearrangement using a key. + + Args: + message (str): The plaintext message to be encrypted. + key (list[int]): The permutation key for decryption. + block_size (int): The size of each permutation block. + + Returns: + tuple: A tuple containing the encrypted message and the encryption key. + + Example: + >>> encrypted_message, key = encrypt("HELLO WORLD") + >>> decrypted_message = decrypt(encrypted_message, key) + >>> decrypted_message + 'HELLO WORLD' + """ + message = message.upper() + message_length = len(message) + + if key is None or block_size is None: + block_size = generate_valid_block_size(message_length) + key = generate_permutation_key(block_size) + + encrypted_message = "" + + for i in range(0, message_length, block_size): + block = message[i : i + block_size] + rearranged_block = [block[digit] for digit in key] + encrypted_message += "".join(rearranged_block) + + return encrypted_message, key + + +def decrypt(encrypted_message: str, key: list[int]) -> str: + """ + Decrypt an encrypted message using a permutation cipher with block rearrangement. + + Args: + encrypted_message (str): The encrypted message. + key (list[int]): The permutation key for decryption. + + Returns: + str: The decrypted plaintext message. + + Example: + >>> encrypted_message, key = encrypt("HELLO WORLD") + >>> decrypted_message = decrypt(encrypted_message, key) + >>> decrypted_message + 'HELLO WORLD' + """ + key_length = len(key) + decrypted_message = "" + + for i in range(0, len(encrypted_message), key_length): + block = encrypted_message[i : i + key_length] + original_block = [""] * key_length + for j, digit in enumerate(key): + original_block[digit] = block[j] + decrypted_message += "".join(original_block) + + return decrypted_message + + +def main() -> None: + """ + Driver function to pass message to get encrypted, then decrypted. + + Example: + >>> main() + Decrypted message: HELLO WORLD + """ + message = "HELLO WORLD" + encrypted_message, key = encrypt(message) + + decrypted_message = decrypt(encrypted_message, key) + print(f"Decrypted message: {decrypted_message}") + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + main() From b0aa35c7b360f1d141705b97c89d51603a3461a6 Mon Sep 17 00:00:00 2001 From: Dean Bring Date: Mon, 9 Oct 2023 14:21:46 -0700 Subject: [PATCH 078/306] Added the Chebyshev distance function (#10144) * Added the Chebyshev distance function * Remove float cast and made error handling more precise --- maths/chebyshev_distance.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 maths/chebyshev_distance.py diff --git a/maths/chebyshev_distance.py b/maths/chebyshev_distance.py new file mode 100644 index 000000000..4801d3916 --- /dev/null +++ b/maths/chebyshev_distance.py @@ -0,0 +1,20 @@ +def chebyshev_distance(point_a: list[float], point_b: list[float]) -> float: + """ + This function calculates the Chebyshev distance (also known as the + Chessboard distance) between two n-dimensional points represented as lists. + + https://en.wikipedia.org/wiki/Chebyshev_distance + + >>> chebyshev_distance([1.0, 1.0], [2.0, 2.0]) + 1.0 + >>> chebyshev_distance([1.0, 1.0, 9.0], [2.0, 2.0, -5.2]) + 14.2 + >>> chebyshev_distance([1.0], [2.0, 2.0]) + Traceback (most recent call last): + ... + ValueError: Both points must have the same dimension. + """ + if len(point_a) != len(point_b): + raise ValueError("Both points must have the same dimension.") + + return max(abs(a - b) for a, b in zip(point_a, point_b)) From 53638fcec4ff990ced9afb569c18b927df652596 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 23:38:32 +0200 Subject: [PATCH 079/306] [pre-commit.ci] pre-commit autoupdate (#10197) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pre-commit/pre-commit-hooks: v4.4.0 → v4.5.0](https://github.com/pre-commit/pre-commit-hooks/compare/v4.4.0...v4.5.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8a88dcc07..7340a0fd0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v4.5.0 hooks: - id: check-executables-have-shebangs - id: check-toml From b9a797f3d4f1a66da1e213bd92e08fa9cf6c3643 Mon Sep 17 00:00:00 2001 From: Dean Bring Date: Mon, 9 Oct 2023 16:00:37 -0700 Subject: [PATCH 080/306] Added the Minkowski distance function (#10143) * Added the Minkowski distance function * Made error handling more precise * Added note about floating point errors and corresponding doctest --- maths/minkowski_distance.py | 45 +++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 maths/minkowski_distance.py diff --git a/maths/minkowski_distance.py b/maths/minkowski_distance.py new file mode 100644 index 000000000..3237124e8 --- /dev/null +++ b/maths/minkowski_distance.py @@ -0,0 +1,45 @@ +def minkowski_distance( + point_a: list[float], + point_b: list[float], + order: int, +) -> float: + """ + This function calculates the Minkowski distance for a given order between + two n-dimensional points represented as lists. For the case of order = 1, + the Minkowski distance degenerates to the Manhattan distance. For + order = 2, the usual Euclidean distance is obtained. + + https://en.wikipedia.org/wiki/Minkowski_distance + + Note: due to floating point calculation errors the output of this + function may be inaccurate. + + >>> minkowski_distance([1.0, 1.0], [2.0, 2.0], 1) + 2.0 + >>> minkowski_distance([1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0], 2) + 8.0 + >>> import numpy as np + >>> np.isclose(5.0, minkowski_distance([5.0], [0.0], 3)) + True + >>> minkowski_distance([1.0], [2.0], -1) + Traceback (most recent call last): + ... + ValueError: The order must be greater than or equal to 1. + >>> minkowski_distance([1.0], [1.0, 2.0], 1) + Traceback (most recent call last): + ... + ValueError: Both points must have the same dimension. + """ + if order < 1: + raise ValueError("The order must be greater than or equal to 1.") + + if len(point_a) != len(point_b): + raise ValueError("Both points must have the same dimension.") + + return sum(abs(a - b) ** order for a, b in zip(point_a, point_b)) ** (1 / order) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 7b996e2c221aa88b5688ea08f2bb3a391b5be2c6 Mon Sep 17 00:00:00 2001 From: Kamil <32775019+quant12345@users.noreply.github.com> Date: Tue, 10 Oct 2023 09:16:02 +0500 Subject: [PATCH 081/306] backtracking -> word_search - replacing the example in doctest (#10188) * Replacing the generator with numpy vector operations from lu_decomposition. * Revert "Replacing the generator with numpy vector operations from lu_decomposition." This reverts commit ad217c66165898d62b76cc89ba09c2d7049b6448. * Replacing the example in doctest with a less resource-intensive example. --- backtracking/word_search.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/backtracking/word_search.py b/backtracking/word_search.py index c9d52012b..8a9b2f1b5 100644 --- a/backtracking/word_search.py +++ b/backtracking/word_search.py @@ -98,13 +98,7 @@ def word_exists(board: list[list[str]], word: str) -> bool: False >>> word_exists([["A"]], "A") True - >>> word_exists([["A","A","A","A","A","A"], - ... ["A","A","A","A","A","A"], - ... ["A","A","A","A","A","A"], - ... ["A","A","A","A","A","A"], - ... ["A","A","A","A","A","B"], - ... ["A","A","A","A","B","A"]], - ... "AAAAAAAAAAAAABB") + >>> word_exists([["B", "A", "A"], ["A", "A", "A"], ["A", "B", "A"]], "ABB") False >>> word_exists([["A"]], 123) Traceback (most recent call last): From 4f8fa3c44a29cafaed64a73588a309e88d1f3ded Mon Sep 17 00:00:00 2001 From: Md Mahiuddin <68785084+mahiuddin-dev@users.noreply.github.com> Date: Tue, 10 Oct 2023 10:19:40 +0600 Subject: [PATCH 082/306] TypeError for non-integer input (#9250) * type error check * remove str input * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/number_of_digits.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/maths/number_of_digits.py b/maths/number_of_digits.py index 86bc67f72..bb9c0d248 100644 --- a/maths/number_of_digits.py +++ b/maths/number_of_digits.py @@ -16,7 +16,15 @@ def num_digits(n: int) -> int: 1 >>> num_digits(-123456) 6 + >>> num_digits('123') # Raises a TypeError for non-integer input + Traceback (most recent call last): + ... + TypeError: Input must be an integer """ + + if not isinstance(n, int): + raise TypeError("Input must be an integer") + digits = 0 n = abs(n) while True: @@ -42,7 +50,15 @@ def num_digits_fast(n: int) -> int: 1 >>> num_digits_fast(-123456) 6 + >>> num_digits('123') # Raises a TypeError for non-integer input + Traceback (most recent call last): + ... + TypeError: Input must be an integer """ + + if not isinstance(n, int): + raise TypeError("Input must be an integer") + return 1 if n == 0 else math.floor(math.log(abs(n), 10) + 1) @@ -61,7 +77,15 @@ def num_digits_faster(n: int) -> int: 1 >>> num_digits_faster(-123456) 6 + >>> num_digits('123') # Raises a TypeError for non-integer input + Traceback (most recent call last): + ... + TypeError: Input must be an integer """ + + if not isinstance(n, int): + raise TypeError("Input must be an integer") + return len(str(abs(n))) From 1b4c4e7db216305e059cc087c3f09bc6d3e17575 Mon Sep 17 00:00:00 2001 From: dimonalik <114773527+dimonalik@users.noreply.github.com> Date: Tue, 10 Oct 2023 07:34:36 +0300 Subject: [PATCH 083/306] Made problem explanation more clear (#9841) * Update minimum_steps_to_one.py Made the problem explanation more clear and readable * updating DIRECTORY.md * Apply suggestions from code review --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- dynamic_programming/minimum_steps_to_one.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dynamic_programming/minimum_steps_to_one.py b/dynamic_programming/minimum_steps_to_one.py index 8785027fb..68eaf56e2 100644 --- a/dynamic_programming/minimum_steps_to_one.py +++ b/dynamic_programming/minimum_steps_to_one.py @@ -1,7 +1,7 @@ """ YouTube Explanation: https://www.youtube.com/watch?v=f2xi3c1S95M -Given an integer n, return the minimum steps to 1 +Given an integer n, return the minimum steps from n to 1 AVAILABLE STEPS: * Decrement by 1 From 9c02f1220e571f2922855e245c5a92d4f2220f8a Mon Sep 17 00:00:00 2001 From: AkhilYadavPadala <142014008+AkhilYadavPadala@users.noreply.github.com> Date: Tue, 10 Oct 2023 10:43:32 +0530 Subject: [PATCH 084/306] seperation between description and docstrings (#9687) * seperation between description and docstrings * Update maths/factorial.py --------- Co-authored-by: sarayu sree Co-authored-by: Tianyi Zheng --- maths/factorial.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/maths/factorial.py b/maths/factorial.py index 18cacdef9..aaf90f384 100644 --- a/maths/factorial.py +++ b/maths/factorial.py @@ -1,4 +1,5 @@ -"""Factorial of a positive integer -- https://en.wikipedia.org/wiki/Factorial +""" +Factorial of a positive integer -- https://en.wikipedia.org/wiki/Factorial """ From f3acb52cadade9e7d012bf7f50cad32669b67b75 Mon Sep 17 00:00:00 2001 From: Paarth Goyal <138299656+pluto-tofu@users.noreply.github.com> Date: Tue, 10 Oct 2023 10:54:04 +0530 Subject: [PATCH 085/306] Added the algorithm to compute Reynolds number in the physics section (#9913) * added the algorithm to compute Reynolds number * fixed file name issue * Apply suggestions from code review --------- Co-authored-by: Tianyi Zheng --- physics/reynolds_number.py | 63 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 physics/reynolds_number.py diff --git a/physics/reynolds_number.py b/physics/reynolds_number.py new file mode 100644 index 000000000..dffe690f8 --- /dev/null +++ b/physics/reynolds_number.py @@ -0,0 +1,63 @@ +""" +Title : computing the Reynolds number to find + out the type of flow (laminar or turbulent) + +Reynolds number is a dimensionless quantity that is used to determine +the type of flow pattern as laminar or turbulent while flowing through a +pipe. Reynolds number is defined by the ratio of inertial forces to that of +viscous forces. + +R = Inertial Forces / Viscous Forces +R = (ρ * V * D)/μ + +where : +ρ = Density of fluid (in Kg/m^3) +D = Diameter of pipe through which fluid flows (in m) +V = Velocity of flow of the fluid (in m/s) +μ = Viscosity of the fluid (in Ns/m^2) + +If the Reynolds number calculated is high (greater than 2000), then the +flow through the pipe is said to be turbulent. If Reynolds number is low +(less than 2000), the flow is said to be laminar. Numerically, these are +acceptable values, although in general the laminar and turbulent flows +are classified according to a range. Laminar flow falls below Reynolds +number of 1100 and turbulent falls in a range greater than 2200. +Laminar flow is the type of flow in which the fluid travels smoothly in +regular paths. Conversely, turbulent flow isn't smooth and follows an +irregular path with lots of mixing. + +Reference : https://byjus.com/physics/reynolds-number/ +""" + + +def reynolds_number( + density: float, velocity: float, diameter: float, viscosity: float +) -> float: + """ + >>> reynolds_number(900, 2.5, 0.05, 0.4) + 281.25 + >>> reynolds_number(450, 3.86, 0.078, 0.23) + 589.0695652173912 + >>> reynolds_number(234, -4.5, 0.3, 0.44) + 717.9545454545454 + >>> reynolds_number(-90, 2, 0.045, 1) + Traceback (most recent call last): + ... + ValueError: please ensure that density, diameter and viscosity are positive + >>> reynolds_number(0, 2, -0.4, -2) + Traceback (most recent call last): + ... + ValueError: please ensure that density, diameter and viscosity are positive + """ + + if density <= 0 or diameter <= 0 or viscosity <= 0: + raise ValueError( + "please ensure that density, diameter and viscosity are positive" + ) + return (density * abs(velocity) * diameter) / viscosity + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 6d136036672072a2c4870da7741d4ad3026a7357 Mon Sep 17 00:00:00 2001 From: Vipin Karthic <143083087+vipinkarthic@users.noreply.github.com> Date: Tue, 10 Oct 2023 11:22:37 +0530 Subject: [PATCH 086/306] Fixes #9943 Added Doctests to binary_exponentiation_3.py (#10121) * Python mirror_formulae.py is added to the repository * Changes done after reading readme.md * Changes for running doctest on all platforms * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Change 2 for Doctests * Changes for doctest 2 * updating DIRECTORY.md * Doctest whitespace error rectification to mirror_formulae.py * updating DIRECTORY.md * Adding Thermodynamic Work Done Formulae * Work done on/by body in a thermodynamic setting * updating DIRECTORY.md * updating DIRECTORY.md * Doctest adiition to binary_exponentiation_3.py * Change 1 * updating DIRECTORY.md * Rename binary_exponentiation_3.py to binary_exponentiation_2.py * updating DIRECTORY.md * updating DIRECTORY.md * Formatting --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: Tianyi Zheng --- DIRECTORY.md | 26 +++++++++++++-- maths/binary_exponentiation_2.py | 57 +++++++++++++++++++------------- 2 files changed, 58 insertions(+), 25 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index b1a23a239..015efb3c7 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -54,13 +54,12 @@ * [Largest Pow Of Two Le Num](bit_manipulation/largest_pow_of_two_le_num.py) * [Missing Number](bit_manipulation/missing_number.py) * [Numbers Different Signs](bit_manipulation/numbers_different_signs.py) + * [Power Of 4](bit_manipulation/power_of_4.py) * [Reverse Bits](bit_manipulation/reverse_bits.py) * [Single Bit Manipulation Operations](bit_manipulation/single_bit_manipulation_operations.py) ## Blockchain - * [Chinese Remainder Theorem](blockchain/chinese_remainder_theorem.py) * [Diophantine Equation](blockchain/diophantine_equation.py) - * [Modular Division](blockchain/modular_division.py) ## Boolean Algebra * [And Gate](boolean_algebra/and_gate.py) @@ -101,11 +100,13 @@ * [Diffie Hellman](ciphers/diffie_hellman.py) * [Elgamal Key Generator](ciphers/elgamal_key_generator.py) * [Enigma Machine2](ciphers/enigma_machine2.py) + * [Fractionated Morse Cipher](ciphers/fractionated_morse_cipher.py) * [Hill Cipher](ciphers/hill_cipher.py) * [Mixed Keyword Cypher](ciphers/mixed_keyword_cypher.py) * [Mono Alphabetic Ciphers](ciphers/mono_alphabetic_ciphers.py) * [Morse Code](ciphers/morse_code.py) * [Onepad Cipher](ciphers/onepad_cipher.py) + * [Permutation Cipher](ciphers/permutation_cipher.py) * [Playfair Cipher](ciphers/playfair_cipher.py) * [Polybius](ciphers/polybius.py) * [Porta Cipher](ciphers/porta_cipher.py) @@ -172,6 +173,7 @@ ## Data Structures * Arrays + * [Equilibrium Index In Array](data_structures/arrays/equilibrium_index_in_array.py) * [Median Two Array](data_structures/arrays/median_two_array.py) * [Permutations](data_structures/arrays/permutations.py) * [Prefix Sum](data_structures/arrays/prefix_sum.py) @@ -352,6 +354,7 @@ * [Smith Waterman](dynamic_programming/smith_waterman.py) * [Subset Generation](dynamic_programming/subset_generation.py) * [Sum Of Subset](dynamic_programming/sum_of_subset.py) + * [Trapped Water](dynamic_programming/trapped_water.py) * [Tribonacci](dynamic_programming/tribonacci.py) * [Viterbi](dynamic_programming/viterbi.py) * [Word Break](dynamic_programming/word_break.py) @@ -360,6 +363,7 @@ * [Apparent Power](electronics/apparent_power.py) * [Builtin Voltage](electronics/builtin_voltage.py) * [Carrier Concentration](electronics/carrier_concentration.py) + * [Charging Capacitor](electronics/charging_capacitor.py) * [Circular Convolution](electronics/circular_convolution.py) * [Coulombs Law](electronics/coulombs_law.py) * [Electric Conductivity](electronics/electric_conductivity.py) @@ -466,6 +470,8 @@ * [Test Min Spanning Tree Prim](graphs/tests/test_min_spanning_tree_prim.py) ## Greedy Methods + * [Best Time To Buy And Sell Stock](greedy_methods/best_time_to_buy_and_sell_stock.py) + * [Fractional Cover Problem](greedy_methods/fractional_cover_problem.py) * [Fractional Knapsack](greedy_methods/fractional_knapsack.py) * [Fractional Knapsack 2](greedy_methods/fractional_knapsack_2.py) * [Gas Station](greedy_methods/gas_station.py) @@ -524,6 +530,10 @@ * Local Weighted Learning * [Local Weighted Learning](machine_learning/local_weighted_learning/local_weighted_learning.py) * [Logistic Regression](machine_learning/logistic_regression.py) + * Loss Functions + * [Binary Cross Entropy](machine_learning/loss_functions/binary_cross_entropy.py) + * [Huber Loss](machine_learning/loss_functions/huber_loss.py) + * [Mean Squared Error](machine_learning/loss_functions/mean_squared_error.py) * [Mfcc](machine_learning/mfcc.py) * [Multilayer Perceptron Classifier](machine_learning/multilayer_perceptron_classifier.py) * [Polynomial Regression](machine_learning/polynomial_regression.py) @@ -564,7 +574,9 @@ * [Carmichael Number](maths/carmichael_number.py) * [Catalan Number](maths/catalan_number.py) * [Ceil](maths/ceil.py) + * [Chebyshev Distance](maths/chebyshev_distance.py) * [Check Polygon](maths/check_polygon.py) + * [Chinese Remainder Theorem](maths/chinese_remainder_theorem.py) * [Chudnovsky Algorithm](maths/chudnovsky_algorithm.py) * [Collatz Sequence](maths/collatz_sequence.py) * [Combinations](maths/combinations.py) @@ -591,6 +603,7 @@ * [Gaussian](maths/gaussian.py) * [Gaussian Error Linear Unit](maths/gaussian_error_linear_unit.py) * [Gcd Of N Numbers](maths/gcd_of_n_numbers.py) + * [Germain Primes](maths/germain_primes.py) * [Greatest Common Divisor](maths/greatest_common_divisor.py) * [Greedy Coin Change](maths/greedy_coin_change.py) * [Hamming Numbers](maths/hamming_numbers.py) @@ -618,7 +631,9 @@ * [Matrix Exponentiation](maths/matrix_exponentiation.py) * [Max Sum Sliding Window](maths/max_sum_sliding_window.py) * [Median Of Two Arrays](maths/median_of_two_arrays.py) + * [Minkowski Distance](maths/minkowski_distance.py) * [Mobius Function](maths/mobius_function.py) + * [Modular Division](maths/modular_division.py) * [Modular Exponential](maths/modular_exponential.py) * [Monte Carlo](maths/monte_carlo.py) * [Monte Carlo Dice](maths/monte_carlo_dice.py) @@ -720,12 +735,16 @@ ## Neural Network * [2 Hidden Layers Neural Network](neural_network/2_hidden_layers_neural_network.py) * Activation Functions + * [Binary Step](neural_network/activation_functions/binary_step.py) * [Exponential Linear Unit](neural_network/activation_functions/exponential_linear_unit.py) * [Leaky Rectified Linear Unit](neural_network/activation_functions/leaky_rectified_linear_unit.py) * [Mish](neural_network/activation_functions/mish.py) * [Rectified Linear Unit](neural_network/activation_functions/rectified_linear_unit.py) * [Scaled Exponential Linear Unit](neural_network/activation_functions/scaled_exponential_linear_unit.py) * [Sigmoid Linear Unit](neural_network/activation_functions/sigmoid_linear_unit.py) + * [Soboleva Modified Hyperbolic Tangent](neural_network/activation_functions/soboleva_modified_hyperbolic_tangent.py) + * [Softplus](neural_network/activation_functions/softplus.py) + * [Squareplus](neural_network/activation_functions/squareplus.py) * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](neural_network/convolution_neural_network.py) * [Perceptron](neural_network/perceptron.py) @@ -779,6 +798,7 @@ * [Newtons Second Law Of Motion](physics/newtons_second_law_of_motion.py) * [Photoelectric Effect](physics/photoelectric_effect.py) * [Potential Energy](physics/potential_energy.py) + * [Reynolds Number](physics/reynolds_number.py) * [Rms Speed Of Molecule](physics/rms_speed_of_molecule.py) * [Shear Stress](physics/shear_stress.py) * [Speed Of Sound](physics/speed_of_sound.py) @@ -1101,6 +1121,7 @@ * [Interpolation Search](searches/interpolation_search.py) * [Jump Search](searches/jump_search.py) * [Linear Search](searches/linear_search.py) + * [Median Of Medians](searches/median_of_medians.py) * [Quick Select](searches/quick_select.py) * [Sentinel Linear Search](searches/sentinel_linear_search.py) * [Simple Binary Search](searches/simple_binary_search.py) @@ -1201,6 +1222,7 @@ * [Snake Case To Camel Pascal Case](strings/snake_case_to_camel_pascal_case.py) * [Split](strings/split.py) * [String Switch Case](strings/string_switch_case.py) + * [Strip](strings/strip.py) * [Text Justification](strings/text_justification.py) * [Top K Frequent Words](strings/top_k_frequent_words.py) * [Upper](strings/upper.py) diff --git a/maths/binary_exponentiation_2.py b/maths/binary_exponentiation_2.py index 9cd143e09..edb6b66b2 100644 --- a/maths/binary_exponentiation_2.py +++ b/maths/binary_exponentiation_2.py @@ -1,17 +1,33 @@ """ -* Binary Exponentiation for Powers -* This is a method to find a^b in a time complexity of O(log b) -* This is one of the most commonly used methods of finding powers. -* Also useful in cases where solution to (a^b)%c is required, -* where a,b,c can be numbers over the computers calculation limits. -* Done using iteration, can also be done using recursion +Binary Exponentiation +This is a method to find a^b in O(log b) time complexity +This is one of the most commonly used methods of exponentiation +It's also useful when the solution to (a^b) % c is required because a, b, c may be +over the computer's calculation limits -* @author chinmoy159 -* @version 1.0 dated 10/08/2017 +Let's say you need to calculate a ^ b +- RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2 +- RULE 2 : IF b is odd, then a ^ b = a * (a ^ (b - 1)), where b - 1 is even +Once b is even, repeat the process until b = 1 or b = 0, because a^1 = a and a^0 = 1 + +For modular exponentiation, we use the fact that (a*b) % c = ((a%c) * (b%c)) % c +Now apply RULE 1 or 2 as required + +@author chinmoy159 """ def b_expo(a: int, b: int) -> int: + """ + >>> b_expo(2, 10) + 1024 + >>> b_expo(9, 0) + 1 + >>> b_expo(0, 12) + 0 + >>> b_expo(4, 12) + 16777216 + """ res = 1 while b > 0: if b & 1: @@ -24,6 +40,16 @@ def b_expo(a: int, b: int) -> int: def b_expo_mod(a: int, b: int, c: int) -> int: + """ + >>> b_expo_mod(2, 10, 1000000007) + 1024 + >>> b_expo_mod(11, 13, 19) + 11 + >>> b_expo_mod(0, 19, 20) + 0 + >>> b_expo_mod(15, 5, 4) + 3 + """ res = 1 while b > 0: if b & 1: @@ -33,18 +59,3 @@ def b_expo_mod(a: int, b: int, c: int) -> int: b >>= 1 return res - - -""" -* Wondering how this method works ! -* It's pretty simple. -* Let's say you need to calculate a ^ b -* RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2 -* RULE 2 : IF b is ODD, then ---- a ^ b = a * (a ^ (b - 1)) :: where (b - 1) is even. -* Once b is even, repeat the process to get a ^ b -* Repeat the process till b = 1 OR b = 0, because a^1 = a AND a^0 = 1 -* -* As far as the modulo is concerned, -* the fact : (a*b) % c = ((a%c) * (b%c)) % c -* Now apply RULE 1 OR 2 whichever is required. -""" From 59fc0cefefce77718044eb797e2c33cf8a7e1f9a Mon Sep 17 00:00:00 2001 From: Arnav Kohli <95236897+THEGAMECHANGER416@users.noreply.github.com> Date: Tue, 10 Oct 2023 18:50:49 +0530 Subject: [PATCH 087/306] Added categorical_crossentropy loss function (#10152) --- .../categorical_cross_entropy.py | 85 +++++++++++++++++++ 1 file changed, 85 insertions(+) create mode 100644 machine_learning/loss_functions/categorical_cross_entropy.py diff --git a/machine_learning/loss_functions/categorical_cross_entropy.py b/machine_learning/loss_functions/categorical_cross_entropy.py new file mode 100644 index 000000000..68f98902b --- /dev/null +++ b/machine_learning/loss_functions/categorical_cross_entropy.py @@ -0,0 +1,85 @@ +""" +Categorical Cross-Entropy Loss + +This function calculates the Categorical Cross-Entropy Loss between true class +labels and predicted class probabilities. + +Formula: +Categorical Cross-Entropy Loss = -Σ(y_true * ln(y_pred)) + +Resources: +- [Wikipedia - Cross entropy](https://en.wikipedia.org/wiki/Cross_entropy) +""" + +import numpy as np + + +def categorical_cross_entropy( + y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-15 +) -> float: + """ + Calculate Categorical Cross-Entropy Loss between true class labels and + predicted class probabilities. + + Parameters: + - y_true: True class labels (one-hot encoded) as a NumPy array. + - y_pred: Predicted class probabilities as a NumPy array. + - epsilon: Small constant to avoid numerical instability. + + Returns: + - ce_loss: Categorical Cross-Entropy Loss as a floating-point number. + + Example: + >>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]]) + >>> categorical_cross_entropy(true_labels, pred_probs) + 0.567395975254385 + + >>> y_true = np.array([[1, 0], [0, 1]]) + >>> y_pred = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) + >>> categorical_cross_entropy(y_true, y_pred) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same shape. + + >>> y_true = np.array([[2, 0, 1], [1, 0, 0]]) + >>> y_pred = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) + >>> categorical_cross_entropy(y_true, y_pred) + Traceback (most recent call last): + ... + ValueError: y_true must be one-hot encoded. + + >>> y_true = np.array([[1, 0, 1], [1, 0, 0]]) + >>> y_pred = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) + >>> categorical_cross_entropy(y_true, y_pred) + Traceback (most recent call last): + ... + ValueError: y_true must be one-hot encoded. + + >>> y_true = np.array([[1, 0, 0], [0, 1, 0]]) + >>> y_pred = np.array([[0.9, 0.1, 0.1], [0.2, 0.7, 0.1]]) + >>> categorical_cross_entropy(y_true, y_pred) + Traceback (most recent call last): + ... + ValueError: Predicted probabilities must sum to approximately 1. + """ + if y_true.shape != y_pred.shape: + raise ValueError("Input arrays must have the same shape.") + + if np.any((y_true != 0) & (y_true != 1)) or np.any(y_true.sum(axis=1) != 1): + raise ValueError("y_true must be one-hot encoded.") + + if not np.all(np.isclose(np.sum(y_pred, axis=1), 1, rtol=epsilon, atol=epsilon)): + raise ValueError("Predicted probabilities must sum to approximately 1.") + + # Clip predicted probabilities to avoid log(0) + y_pred = np.clip(y_pred, epsilon, 1) + + # Calculate categorical cross-entropy loss + return -np.sum(y_true * np.log(y_pred)) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 0b440285e813c54cda188eac278bda6fa4b1169f Mon Sep 17 00:00:00 2001 From: Kamil <32775019+quant12345@users.noreply.github.com> Date: Tue, 10 Oct 2023 19:24:51 +0500 Subject: [PATCH 088/306] Gaussian_elemination - change to remove warning (#10221) * Replacing the generator with numpy vector operations from lu_decomposition. * Revert "Replacing the generator with numpy vector operations from lu_decomposition." This reverts commit ad217c66165898d62b76cc89ba09c2d7049b6448. * Removes the warning: Conversion of an array with ndim > 0 to a scalar is deprecated, and will error in future. Ensure you extract a single element from your array before performing this operation --- arithmetic_analysis/gaussian_elimination.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arithmetic_analysis/gaussian_elimination.py b/arithmetic_analysis/gaussian_elimination.py index 13f509a4f..a1a35131b 100644 --- a/arithmetic_analysis/gaussian_elimination.py +++ b/arithmetic_analysis/gaussian_elimination.py @@ -34,7 +34,7 @@ def retroactive_resolution( x: NDArray[float64] = np.zeros((rows, 1), dtype=float) for row in reversed(range(rows)): total = np.dot(coefficients[row, row + 1 :], x[row + 1 :]) - x[row, 0] = (vector[row] - total) / coefficients[row, row] + x[row, 0] = (vector[row][0] - total[0]) / coefficients[row, row] return x From 5be5d21bed4bb546c81b5771bebca336978111e7 Mon Sep 17 00:00:00 2001 From: hollowcrust <72879387+hollowcrust@users.noreply.github.com> Date: Wed, 11 Oct 2023 00:52:53 +0800 Subject: [PATCH 089/306] Add tests for infix_2_postfix() in infix_to_prefix_conversion.py (#10095) * Add doctests, exceptions, type hints and fix bug for infix_to_prefix_conversion.py Add doctests Add exceptions for expressions with invalid bracket positions Add type hints for functions Fix a bug on line 53 (57 in PR) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Change type hints in infix_to_prefix_conversion.py * Remove printing trailing whitespace in the output table * Fix type hint errors * Fix doctests * Adjust table convention in output and doctests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add doctests for infix_2_postfix() * Update print_width * Update print_width * Fix the doctests --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../stacks/infix_to_prefix_conversion.py | 73 +++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/data_structures/stacks/infix_to_prefix_conversion.py b/data_structures/stacks/infix_to_prefix_conversion.py index 6f6d5d57e..1127211d5 100644 --- a/data_structures/stacks/infix_to_prefix_conversion.py +++ b/data_structures/stacks/infix_to_prefix_conversion.py @@ -16,6 +16,39 @@ Enter an Infix Equation = a + b ^c def infix_2_postfix(infix): + """ + >>> infix_2_postfix("a+b^c") # doctest: +NORMALIZE_WHITESPACE + Symbol | Stack | Postfix + ---------------------------- + a | | a + + | + | a + b | + | ab + ^ | +^ | ab + c | +^ | abc + | + | abc^ + | | abc^+ + 'abc^+' + >>> infix_2_postfix("1*((-a)*2+b)") + Traceback (most recent call last): + ... + KeyError: '(' + >>> infix_2_postfix("") + Symbol | Stack | Postfix + ---------------------------- + '' + >>> infix_2_postfix("(()") # doctest: +NORMALIZE_WHITESPACE + Symbol | Stack | Postfix + ---------------------------- + ( | ( | + ( | (( | + ) | ( | + | | ( + '(' + >>> infix_2_postfix("())") + Traceback (most recent call last): + ... + IndexError: list index out of range + """ stack = [] post_fix = [] priority = { @@ -74,6 +107,42 @@ def infix_2_postfix(infix): def infix_2_prefix(infix): + """ + >>> infix_2_prefix("a+b^c") # doctest: +NORMALIZE_WHITESPACE + Symbol | Stack | Postfix + ---------------------------- + c | | c + ^ | ^ | c + b | ^ | cb + + | + | cb^ + a | + | cb^a + | | cb^a+ + '+a^bc' + + >>> infix_2_prefix("1*((-a)*2+b)") + Traceback (most recent call last): + ... + KeyError: '(' + + >>> infix_2_prefix('') + Symbol | Stack | Postfix + ---------------------------- + '' + + >>> infix_2_prefix('(()') + Traceback (most recent call last): + ... + IndexError: list index out of range + + >>> infix_2_prefix('())') # doctest: +NORMALIZE_WHITESPACE + Symbol | Stack | Postfix + ---------------------------- + ( | ( | + ( | (( | + ) | ( | + | | ( + '(' + """ infix = list(infix[::-1]) # reverse the infix equation for i in range(len(infix)): @@ -88,6 +157,10 @@ def infix_2_prefix(infix): if __name__ == "__main__": + from doctest import testmod + + testmod() + Infix = input("\nEnter an Infix Equation = ") # Input an Infix equation Infix = "".join(Infix.split()) # Remove spaces from the input print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)") From 9a5a6c663cefb8cbc63329c27188f64462072a4c Mon Sep 17 00:00:00 2001 From: Kamil <32775019+quant12345@users.noreply.github.com> Date: Wed, 11 Oct 2023 01:14:13 +0500 Subject: [PATCH 090/306] carmichael_number - add doctests (#10038) * Replacing the generator with numpy vector operations from lu_decomposition. * Revert "Replacing the generator with numpy vector operations from lu_decomposition." This reverts commit ad217c66165898d62b76cc89ba09c2d7049b6448. * Added doctests * Update carmichael_number.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update carmichael_number.py I make an empty commit to reset: tests are failing. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update carmichael_number.py Made changes taking into account the addition: from maths.greatest_common_divisor import greatest_common_divisor. Now instead of gcd it is used: greatest_common_divisor. * Update carmichael_number.py * Update carmichael_number.py * Update carmichael_number.py I added a check for 0 and negative numbers in the tests and the code itself. Simplified obtaining the final result. * Update carmichael_number.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/carmichael_number.py Co-authored-by: Tianyi Zheng * Update carmichael_number.py * Update carmichael_number.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- maths/carmichael_number.py | 55 +++++++++++++++++++++++++++++++++----- 1 file changed, 49 insertions(+), 6 deletions(-) diff --git a/maths/carmichael_number.py b/maths/carmichael_number.py index 81712520f..08b5c70e8 100644 --- a/maths/carmichael_number.py +++ b/maths/carmichael_number.py @@ -10,10 +10,21 @@ satisfies the following modular arithmetic condition: Examples of Carmichael Numbers: 561, 1105, ... https://en.wikipedia.org/wiki/Carmichael_number """ + from maths.greatest_common_divisor import greatest_common_divisor def power(x: int, y: int, mod: int) -> int: + """ + + Examples: + >>> power(2, 15, 3) + 2 + + >>> power(5, 1, 30) + 5 + """ + if y == 0: return 1 temp = power(x, y // 2, mod) % mod @@ -24,15 +35,47 @@ def power(x: int, y: int, mod: int) -> int: def is_carmichael_number(n: int) -> bool: - b = 2 - while b < n: - if greatest_common_divisor(b, n) == 1 and power(b, n - 1, n) != 1: - return False - b += 1 - return True + """ + + Examples: + >>> is_carmichael_number(562) + False + + >>> is_carmichael_number(561) + True + + >>> is_carmichael_number(5.1) + Traceback (most recent call last): + ... + ValueError: Number 5.1 must instead be a positive integer + + >>> is_carmichael_number(-7) + Traceback (most recent call last): + ... + ValueError: Number -7 must instead be a positive integer + + >>> is_carmichael_number(0) + Traceback (most recent call last): + ... + ValueError: Number 0 must instead be a positive integer + """ + + if n <= 0 or not isinstance(n, int): + msg = f"Number {n} must instead be a positive integer" + raise ValueError(msg) + + return all( + power(b, n - 1, n) == 1 + for b in range(2, n) + if greatest_common_divisor(b, n) == 1 + ) if __name__ == "__main__": + import doctest + + doctest.testmod() + number = int(input("Enter number: ").strip()) if is_carmichael_number(number): print(f"{number} is a Carmichael Number.") From 00707392332b90cc9babf7258b1de3e0efa0a580 Mon Sep 17 00:00:00 2001 From: Kamil <32775019+quant12345@users.noreply.github.com> Date: Wed, 11 Oct 2023 01:18:31 +0500 Subject: [PATCH 091/306] k_means_clust - change to remove warning (#10244) * Replacing the generator with numpy vector operations from lu_decomposition. * Revert "Replacing the generator with numpy vector operations from lu_decomposition." This reverts commit ad217c66165898d62b76cc89ba09c2d7049b6448. * the change removes the warning: /home/runner/work/Python/Python/machine_learning/k_means_clust.py:236: FutureWarning: The provided callable is currently using SeriesGroupBy.sum. In a future version of pandas, the provided callable will be used directly. To keep current behavior pass the string "sum" instead. .agg( And /home/runner/work/Python/Python/machine_learning/k_means_clust.py:236: FutureWarning: The provided callable is currently using SeriesGroupBy.mean. In a future version of pandas, the provided callable will be used directly. To keep current behavior pass the string "mean" instead. .agg( --- machine_learning/k_means_clust.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/machine_learning/k_means_clust.py b/machine_learning/k_means_clust.py index d93c5addf..3fe151442 100644 --- a/machine_learning/k_means_clust.py +++ b/machine_learning/k_means_clust.py @@ -235,7 +235,7 @@ def report_generator( ] # group by cluster number .agg( [ - ("sum", np.sum), + ("sum", "sum"), ("mean_with_zeros", lambda x: np.mean(np.nan_to_num(x))), ("mean_without_zeros", lambda x: x.replace(0, np.NaN).mean()), ( @@ -248,7 +248,7 @@ def report_generator( ) ), ), - ("mean_with_na", np.mean), + ("mean_with_na", "mean"), ("min", lambda x: x.min()), ("5%", lambda x: x.quantile(0.05)), ("25%", lambda x: x.quantile(0.25)), From c850227bee5efd9383d1cb8150500eb304c809fc Mon Sep 17 00:00:00 2001 From: cornbread-eater <146371786+cornbread-eater@users.noreply.github.com> Date: Tue, 10 Oct 2023 14:07:07 -0700 Subject: [PATCH 092/306] Add doctests to primelib.py (#10242) * Update primelib.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/primelib.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/maths/primelib.py b/maths/primelib.py index cf01750cf..d5c124255 100644 --- a/maths/primelib.py +++ b/maths/primelib.py @@ -574,6 +574,11 @@ def fib(n): """ input: positive integer 'n' returns the n-th fibonacci term , indexing by 0 + + >>> fib(5) + 8 + >>> fib(99) + 354224848179261915075 """ # precondition @@ -589,3 +594,9 @@ def fib(n): fib1 = tmp return ans + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 672fda913087ab64f9eb7b3a5600cbf83680fb8e Mon Sep 17 00:00:00 2001 From: hollowcrust <72879387+hollowcrust@users.noreply.github.com> Date: Wed, 11 Oct 2023 17:00:49 +0800 Subject: [PATCH 093/306] Fix bug and edit doctests for infix_to_prefix_conversion (#10259) * Fix bug and edit doctests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add type hints, raiseError and other minor adjustments * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleaning code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../stacks/infix_to_prefix_conversion.py | 101 +++++++++++------- 1 file changed, 64 insertions(+), 37 deletions(-) diff --git a/data_structures/stacks/infix_to_prefix_conversion.py b/data_structures/stacks/infix_to_prefix_conversion.py index 1127211d5..beff421c0 100644 --- a/data_structures/stacks/infix_to_prefix_conversion.py +++ b/data_structures/stacks/infix_to_prefix_conversion.py @@ -15,7 +15,7 @@ Enter an Infix Equation = a + b ^c """ -def infix_2_postfix(infix): +def infix_2_postfix(infix: str) -> str: """ >>> infix_2_postfix("a+b^c") # doctest: +NORMALIZE_WHITESPACE Symbol | Stack | Postfix @@ -28,22 +28,35 @@ def infix_2_postfix(infix): | + | abc^ | | abc^+ 'abc^+' - >>> infix_2_postfix("1*((-a)*2+b)") - Traceback (most recent call last): - ... - KeyError: '(' + + >>> infix_2_postfix("1*((-a)*2+b)") # doctest: +NORMALIZE_WHITESPACE + Symbol | Stack | Postfix + ------------------------------------------- + 1 | | 1 + * | * | 1 + ( | *( | 1 + ( | *(( | 1 + - | *((- | 1 + a | *((- | 1a + ) | *( | 1a- + * | *(* | 1a- + 2 | *(* | 1a-2 + + | *(+ | 1a-2* + b | *(+ | 1a-2*b + ) | * | 1a-2*b+ + | | 1a-2*b+* + '1a-2*b+*' + >>> infix_2_postfix("") Symbol | Stack | Postfix ---------------------------- '' - >>> infix_2_postfix("(()") # doctest: +NORMALIZE_WHITESPACE - Symbol | Stack | Postfix - ---------------------------- - ( | ( | - ( | (( | - ) | ( | - | | ( - '(' + + >>> infix_2_postfix("(()") + Traceback (most recent call last): + ... + ValueError: invalid expression + >>> infix_2_postfix("())") Traceback (most recent call last): ... @@ -59,7 +72,7 @@ def infix_2_postfix(infix): "+": 1, "-": 1, } # Priority of each operator - print_width = len(infix) if (len(infix) > 7) else 7 + print_width = max(len(infix), 7) # Print table header for output print( @@ -76,6 +89,9 @@ def infix_2_postfix(infix): elif x == "(": stack.append(x) # if x is "(" push to Stack elif x == ")": # if x is ")" pop stack until "(" is encountered + if len(stack) == 0: # close bracket without open bracket + raise IndexError("list index out of range") + while stack[-1] != "(": post_fix.append(stack.pop()) # Pop stack & add the content to Postfix stack.pop() @@ -83,7 +99,7 @@ def infix_2_postfix(infix): if len(stack) == 0: stack.append(x) # If stack is empty, push x to stack else: # while priority of x is not > priority of element in the stack - while len(stack) > 0 and priority[x] <= priority[stack[-1]]: + while stack and stack[-1] != "(" and priority[x] <= priority[stack[-1]]: post_fix.append(stack.pop()) # pop stack & add to Postfix stack.append(x) # push x to stack @@ -95,6 +111,9 @@ def infix_2_postfix(infix): ) # Output in tabular format while len(stack) > 0: # while stack is not empty + if stack[-1] == "(": # open bracket with no close bracket + raise ValueError("invalid expression") + post_fix.append(stack.pop()) # pop stack & add to Postfix print( " ".center(8), @@ -106,7 +125,7 @@ def infix_2_postfix(infix): return "".join(post_fix) # return Postfix as str -def infix_2_prefix(infix): +def infix_2_prefix(infix: str) -> str: """ >>> infix_2_prefix("a+b^c") # doctest: +NORMALIZE_WHITESPACE Symbol | Stack | Postfix @@ -119,10 +138,23 @@ def infix_2_prefix(infix): | | cb^a+ '+a^bc' - >>> infix_2_prefix("1*((-a)*2+b)") - Traceback (most recent call last): - ... - KeyError: '(' + >>> infix_2_prefix("1*((-a)*2+b)") # doctest: +NORMALIZE_WHITESPACE + Symbol | Stack | Postfix + ------------------------------------------- + ( | ( | + b | ( | b + + | (+ | b + 2 | (+ | b2 + * | (+* | b2 + ( | (+*( | b2 + a | (+*( | b2a + - | (+*(- | b2a + ) | (+* | b2a- + ) | | b2a-*+ + * | * | b2a-*+ + 1 | * | b2a-*+1 + | | b2a-*+1* + '*1+*-a2b' >>> infix_2_prefix('') Symbol | Stack | Postfix @@ -134,26 +166,21 @@ def infix_2_prefix(infix): ... IndexError: list index out of range - >>> infix_2_prefix('())') # doctest: +NORMALIZE_WHITESPACE - Symbol | Stack | Postfix - ---------------------------- - ( | ( | - ( | (( | - ) | ( | - | | ( - '(' + >>> infix_2_prefix('())') + Traceback (most recent call last): + ... + ValueError: invalid expression """ - infix = list(infix[::-1]) # reverse the infix equation + reversed_infix = list(infix[::-1]) # reverse the infix equation - for i in range(len(infix)): - if infix[i] == "(": - infix[i] = ")" # change "(" to ")" - elif infix[i] == ")": - infix[i] = "(" # change ")" to "(" + for i in range(len(reversed_infix)): + if reversed_infix[i] == "(": + reversed_infix[i] = ")" # change "(" to ")" + elif reversed_infix[i] == ")": + reversed_infix[i] = "(" # change ")" to "(" - return (infix_2_postfix("".join(infix)))[ - ::-1 - ] # call infix_2_postfix on Infix, return reverse of Postfix + # call infix_2_postfix on Infix, return reverse of Postfix + return (infix_2_postfix("".join(reversed_infix)))[::-1] if __name__ == "__main__": From 5fb6496d1bcd076018e6c829c312f486ed7bb2ee Mon Sep 17 00:00:00 2001 From: Ricardo Martinez Peinado <43684906+rmp2000@users.noreply.github.com> Date: Wed, 11 Oct 2023 12:11:05 +0200 Subject: [PATCH 094/306] Improve primelib.py test coverage #9943 (#10251) * Update the doctest of primelib.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Correct errors for the doctest of primelib.py * last error for the doctest of primelib.py * last error for the doctest of primelib.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/primelib.py | 243 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 238 insertions(+), 5 deletions(-) diff --git a/maths/primelib.py b/maths/primelib.py index d5c124255..7e33844be 100644 --- a/maths/primelib.py +++ b/maths/primelib.py @@ -46,6 +46,19 @@ def is_prime(number: int) -> bool: """ input: positive integer 'number' returns true if 'number' is prime otherwise false. + + >>> is_prime(3) + True + >>> is_prime(10) + False + >>> is_prime(-1) + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int and positive + >>> is_prime("test") + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int and positive """ # precondition @@ -83,6 +96,16 @@ def sieve_er(n): This function implements the algorithm called sieve of erathostenes. + >>> sieve_er(8) + [2, 3, 5, 7] + >>> sieve_er(-1) + Traceback (most recent call last): + ... + AssertionError: 'N' must been an int and > 2 + >>> sieve_er("test") + Traceback (most recent call last): + ... + AssertionError: 'N' must been an int and > 2 """ # precondition @@ -116,6 +139,17 @@ def get_prime_numbers(n): input: positive integer 'N' > 2 returns a list of prime numbers from 2 up to N (inclusive) This function is more efficient as function 'sieveEr(...)' + + >>> get_prime_numbers(8) + [2, 3, 5, 7] + >>> get_prime_numbers(-1) + Traceback (most recent call last): + ... + AssertionError: 'N' must been an int and > 2 + >>> get_prime_numbers("test") + Traceback (most recent call last): + ... + AssertionError: 'N' must been an int and > 2 """ # precondition @@ -142,6 +176,21 @@ def prime_factorization(number): """ input: positive integer 'number' returns a list of the prime number factors of 'number' + + >>> prime_factorization(0) + [0] + >>> prime_factorization(8) + [2, 2, 2] + >>> prime_factorization(287) + [7, 41] + >>> prime_factorization(-1) + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int and >= 0 + >>> prime_factorization("test") + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int and >= 0 """ # precondition @@ -183,12 +232,27 @@ def greatest_prime_factor(number): """ input: positive integer 'number' >= 0 returns the greatest prime number factor of 'number' + + >>> greatest_prime_factor(0) + 0 + >>> greatest_prime_factor(8) + 2 + >>> greatest_prime_factor(287) + 41 + >>> greatest_prime_factor(-1) + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int and >= 0 + >>> greatest_prime_factor("test") + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int and >= 0 """ # precondition assert isinstance(number, int) and ( number >= 0 - ), "'number' bust been an int and >= 0" + ), "'number' must been an int and >= 0" ans = 0 @@ -210,12 +274,27 @@ def smallest_prime_factor(number): """ input: integer 'number' >= 0 returns the smallest prime number factor of 'number' + + >>> smallest_prime_factor(0) + 0 + >>> smallest_prime_factor(8) + 2 + >>> smallest_prime_factor(287) + 7 + >>> smallest_prime_factor(-1) + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int and >= 0 + >>> smallest_prime_factor("test") + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int and >= 0 """ # precondition assert isinstance(number, int) and ( number >= 0 - ), "'number' bust been an int and >= 0" + ), "'number' must been an int and >= 0" ans = 0 @@ -237,11 +316,24 @@ def is_even(number): """ input: integer 'number' returns true if 'number' is even, otherwise false. + + >>> is_even(0) + True + >>> is_even(8) + True + >>> is_even(287) + False + >>> is_even(-1) + False + >>> is_even("test") + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int """ # precondition assert isinstance(number, int), "'number' must been an int" - assert isinstance(number % 2 == 0, bool), "compare bust been from type bool" + assert isinstance(number % 2 == 0, bool), "compare must been from type bool" return number % 2 == 0 @@ -253,11 +345,24 @@ def is_odd(number): """ input: integer 'number' returns true if 'number' is odd, otherwise false. + + >>> is_odd(0) + False + >>> is_odd(8) + False + >>> is_odd(287) + True + >>> is_odd(-1) + True + >>> is_odd("test") + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int """ # precondition assert isinstance(number, int), "'number' must been an int" - assert isinstance(number % 2 != 0, bool), "compare bust been from type bool" + assert isinstance(number % 2 != 0, bool), "compare must been from type bool" return number % 2 != 0 @@ -270,6 +375,23 @@ def goldbach(number): Goldbach's assumption input: a even positive integer 'number' > 2 returns a list of two prime numbers whose sum is equal to 'number' + + >>> goldbach(8) + [3, 5] + >>> goldbach(824) + [3, 821] + >>> goldbach(0) + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int, even and > 2 + >>> goldbach(-1) + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int, even and > 2 + >>> goldbach("test") + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int, even and > 2 """ # precondition @@ -323,6 +445,23 @@ def kg_v(number1, number2): Least common multiple input: two positive integer 'number1' and 'number2' returns the least common multiple of 'number1' and 'number2' + + >>> kg_v(8,10) + 40 + >>> kg_v(824,67) + 55208 + >>> kg_v(0) + Traceback (most recent call last): + ... + TypeError: kg_v() missing 1 required positional argument: 'number2' + >>> kg_v(10,-1) + Traceback (most recent call last): + ... + AssertionError: 'number1' and 'number2' must been positive integer. + >>> kg_v("test","test2") + Traceback (most recent call last): + ... + AssertionError: 'number1' and 'number2' must been positive integer. """ # precondition @@ -395,6 +534,21 @@ def get_prime(n): Gets the n-th prime number. input: positive integer 'n' >= 0 returns the n-th prime number, beginning at index 0 + + >>> get_prime(0) + 2 + >>> get_prime(8) + 23 + >>> get_prime(824) + 6337 + >>> get_prime(-1) + Traceback (most recent call last): + ... + AssertionError: 'number' must been a positive int + >>> get_prime("test") + Traceback (most recent call last): + ... + AssertionError: 'number' must been a positive int """ # precondition @@ -430,6 +584,25 @@ def get_primes_between(p_number_1, p_number_2): pNumber1 < pNumber2 returns a list of all prime numbers between 'pNumber1' (exclusive) and 'pNumber2' (exclusive) + + >>> get_primes_between(3, 67) + [5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61] + >>> get_primes_between(0) + Traceback (most recent call last): + ... + TypeError: get_primes_between() missing 1 required positional argument: 'p_number_2' + >>> get_primes_between(0, 1) + Traceback (most recent call last): + ... + AssertionError: The arguments must been prime numbers and 'pNumber1' < 'pNumber2' + >>> get_primes_between(-1, 3) + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int and positive + >>> get_primes_between("test","test") + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int and positive """ # precondition @@ -473,6 +646,19 @@ def get_divisors(n): """ input: positive integer 'n' >= 1 returns all divisors of n (inclusive 1 and 'n') + + >>> get_divisors(8) + [1, 2, 4, 8] + >>> get_divisors(824) + [1, 2, 4, 8, 103, 206, 412, 824] + >>> get_divisors(-1) + Traceback (most recent call last): + ... + AssertionError: 'n' must been int and >= 1 + >>> get_divisors("test") + Traceback (most recent call last): + ... + AssertionError: 'n' must been int and >= 1 """ # precondition @@ -497,6 +683,19 @@ def is_perfect_number(number): """ input: positive integer 'number' > 1 returns true if 'number' is a perfect number otherwise false. + + >>> is_perfect_number(28) + True + >>> is_perfect_number(824) + False + >>> is_perfect_number(-1) + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int and >= 1 + >>> is_perfect_number("test") + Traceback (most recent call last): + ... + AssertionError: 'number' must been an int and >= 1 """ # precondition @@ -525,6 +724,15 @@ def simplify_fraction(numerator, denominator): input: two integer 'numerator' and 'denominator' assumes: 'denominator' != 0 returns: a tuple with simplify numerator and denominator. + + >>> simplify_fraction(10, 20) + (1, 2) + >>> simplify_fraction(10, -1) + (10, -1) + >>> simplify_fraction("test","test") + Traceback (most recent call last): + ... + AssertionError: The arguments must been from type int and 'denominator' != 0 """ # precondition @@ -554,6 +762,19 @@ def factorial(n): """ input: positive integer 'n' returns the factorial of 'n' (n!) + + >>> factorial(0) + 1 + >>> factorial(20) + 2432902008176640000 + >>> factorial(-1) + Traceback (most recent call last): + ... + AssertionError: 'n' must been a int and >= 0 + >>> factorial("test") + Traceback (most recent call last): + ... + AssertionError: 'n' must been a int and >= 0 """ # precondition @@ -570,15 +791,27 @@ def factorial(n): # ------------------------------------------------------------------- -def fib(n): +def fib(n: int) -> int: """ input: positive integer 'n' returns the n-th fibonacci term , indexing by 0 + >>> fib(0) + 1 >>> fib(5) 8 + >>> fib(20) + 10946 >>> fib(99) 354224848179261915075 + >>> fib(-1) + Traceback (most recent call last): + ... + AssertionError: 'n' must been an int and >= 0 + >>> fib("test") + Traceback (most recent call last): + ... + AssertionError: 'n' must been an int and >= 0 """ # precondition From d5323dbaee21a9ae209efa17852b02c3101a0220 Mon Sep 17 00:00:00 2001 From: Aasheesh <126905285+AasheeshLikePanner@users.noreply.github.com> Date: Wed, 11 Oct 2023 23:50:18 +0530 Subject: [PATCH 095/306] Adding doctests in simpson_rule.py (#10269) * Adding doctests in simpson_rule.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/simpson_rule.py Co-authored-by: Christian Clauss * Update maths/simpson_rule.py Co-authored-by: Christian Clauss * Adding doctests in simpson_rule.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Adding doctests in simpson_rule.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Adding doctests in simpson_rule.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Adding doctests in simpson_rule.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Adding doctests in simpson_rule.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update simpson_rule.py * Adding doctests in simpson_rule.py * Adding doctests in simpson_rule.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/simpson_rule.py | 43 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 39 insertions(+), 4 deletions(-) diff --git a/maths/simpson_rule.py b/maths/simpson_rule.py index d66dc39a7..e75fb557a 100644 --- a/maths/simpson_rule.py +++ b/maths/simpson_rule.py @@ -1,7 +1,7 @@ """ Numerical integration or quadrature for a smooth function f with known values at x_i -This method is the classical approach of suming 'Equally Spaced Abscissas' +This method is the classical approach of summing 'Equally Spaced Abscissas' method 2: "Simpson Rule" @@ -9,9 +9,41 @@ method 2: """ -def method_2(boundary, steps): +def method_2(boundary: list[int], steps: int) -> float: # "Simpson Rule" # int(f) = delta_x/2 * (b-a)/3*(f1 + 4f2 + 2f_3 + ... + fn) + """ + Calculate the definite integral of a function using Simpson's Rule. + :param boundary: A list containing the lower and upper bounds of integration. + :param steps: The number of steps or resolution for the integration. + :return: The approximate integral value. + + >>> round(method_2([0, 2, 4], 10), 10) + 2.6666666667 + >>> round(method_2([2, 0], 10), 10) + -0.2666666667 + >>> round(method_2([-2, -1], 10), 10) + 2.172 + >>> round(method_2([0, 1], 10), 10) + 0.3333333333 + >>> round(method_2([0, 2], 10), 10) + 2.6666666667 + >>> round(method_2([0, 2], 100), 10) + 2.5621226667 + >>> round(method_2([0, 1], 1000), 10) + 0.3320026653 + >>> round(method_2([0, 2], 0), 10) + Traceback (most recent call last): + ... + ZeroDivisionError: Number of steps must be greater than zero + >>> round(method_2([0, 2], -10), 10) + Traceback (most recent call last): + ... + ZeroDivisionError: Number of steps must be greater than zero + """ + if steps <= 0: + raise ZeroDivisionError("Number of steps must be greater than zero") + h = (boundary[1] - boundary[0]) / steps a = boundary[0] b = boundary[1] @@ -41,11 +73,14 @@ def f(x): # enter your function here def main(): a = 0.0 # Lower bound of integration b = 1.0 # Upper bound of integration - steps = 10.0 # define number of steps or resolution - boundary = [a, b] # define boundary of integration + steps = 10.0 # number of steps or resolution + boundary = [a, b] # boundary of integration y = method_2(boundary, steps) print(f"y = {y}") if __name__ == "__main__": + import doctest + + doctest.testmod() main() From 3f094fe49d14e64d2c8f0e2c14d339ab6d0ee735 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 11 Oct 2023 20:30:02 +0200 Subject: [PATCH 096/306] Ruff pandas vet (#10281) * Python linting: Add ruff rules for Pandas-vet and Pytest-style * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- DIRECTORY.md | 1 + blockchain/diophantine_equation.py | 6 +- ciphers/xor_cipher.py | 18 +++-- conversions/decimal_to_hexadecimal.py | 3 +- .../binary_search_tree_recursive.py | 28 ++++---- .../hashing/tests/test_hash_map.py | 4 +- .../linked_list/circular_linked_list.py | 6 +- .../test_digital_image_processing.py | 3 +- graphs/graph_adjacency_list.py | 60 ++++++++-------- graphs/graph_adjacency_matrix.py | 60 ++++++++-------- hashes/sha256.py | 2 +- knapsack/tests/test_greedy_knapsack.py | 16 ++--- knapsack/tests/test_knapsack.py | 8 +-- linear_algebra/src/lib.py | 11 ++- linear_algebra/src/schur_complement.py | 7 +- linear_algebra/src/test_linear_algebra.py | 69 ++++++++++--------- machine_learning/dimensionality_reduction.py | 4 +- machine_learning/k_means_clust.py | 69 +++++++++---------- maths/least_common_multiple.py | 4 +- maths/modular_division.py | 10 ++- maths/prime_check.py | 48 ++++++------- matrix/sherman_morrison.py | 6 +- matrix/tests/test_matrix_operation.py | 28 ++++---- project_euler/problem_054/test_poker_hand.py | 14 ++-- pyproject.toml | 8 ++- strings/knuth_morris_pratt.py | 3 +- strings/rabin_karp.py | 3 +- 28 files changed, 260 insertions(+), 241 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7340a0fd0..84f4a7770 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -51,7 +51,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.5.1 + rev: v1.6.0 hooks: - id: mypy args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 015efb3c7..2c6000c94 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -532,6 +532,7 @@ * [Logistic Regression](machine_learning/logistic_regression.py) * Loss Functions * [Binary Cross Entropy](machine_learning/loss_functions/binary_cross_entropy.py) + * [Categorical Cross Entropy](machine_learning/loss_functions/categorical_cross_entropy.py) * [Huber Loss](machine_learning/loss_functions/huber_loss.py) * [Mean Squared Error](machine_learning/loss_functions/mean_squared_error.py) * [Mfcc](machine_learning/mfcc.py) diff --git a/blockchain/diophantine_equation.py b/blockchain/diophantine_equation.py index 7110d9023..ae6a145d2 100644 --- a/blockchain/diophantine_equation.py +++ b/blockchain/diophantine_equation.py @@ -83,7 +83,8 @@ def extended_gcd(a: int, b: int) -> tuple[int, int, int]: (1, -2, 3) """ - assert a >= 0 and b >= 0 + assert a >= 0 + assert b >= 0 if b == 0: d, x, y = a, 1, 0 @@ -92,7 +93,8 @@ def extended_gcd(a: int, b: int) -> tuple[int, int, int]: x = q y = p - q * (a // b) - assert a % d == 0 and b % d == 0 + assert a % d == 0 + assert b % d == 0 assert d == a * x + b * y return (d, x, y) diff --git a/ciphers/xor_cipher.py b/ciphers/xor_cipher.py index 0f369e38f..559036d30 100644 --- a/ciphers/xor_cipher.py +++ b/ciphers/xor_cipher.py @@ -38,7 +38,8 @@ class XORCipher: """ # precondition - assert isinstance(key, int) and isinstance(content, str) + assert isinstance(key, int) + assert isinstance(content, str) key = key or self.__key or 1 @@ -56,7 +57,8 @@ class XORCipher: """ # precondition - assert isinstance(key, int) and isinstance(content, list) + assert isinstance(key, int) + assert isinstance(content, list) key = key or self.__key or 1 @@ -74,7 +76,8 @@ class XORCipher: """ # precondition - assert isinstance(key, int) and isinstance(content, str) + assert isinstance(key, int) + assert isinstance(content, str) key = key or self.__key or 1 @@ -99,7 +102,8 @@ class XORCipher: """ # precondition - assert isinstance(key, int) and isinstance(content, str) + assert isinstance(key, int) + assert isinstance(content, str) key = key or self.__key or 1 @@ -125,7 +129,8 @@ class XORCipher: """ # precondition - assert isinstance(file, str) and isinstance(key, int) + assert isinstance(file, str) + assert isinstance(key, int) try: with open(file) as fin, open("encrypt.out", "w+") as fout: @@ -148,7 +153,8 @@ class XORCipher: """ # precondition - assert isinstance(file, str) and isinstance(key, int) + assert isinstance(file, str) + assert isinstance(key, int) try: with open(file) as fin, open("decrypt.out", "w+") as fout: diff --git a/conversions/decimal_to_hexadecimal.py b/conversions/decimal_to_hexadecimal.py index 5ea48401f..b1fb4f082 100644 --- a/conversions/decimal_to_hexadecimal.py +++ b/conversions/decimal_to_hexadecimal.py @@ -57,7 +57,8 @@ def decimal_to_hexadecimal(decimal: float) -> str: >>> decimal_to_hexadecimal(-256) == hex(-256) True """ - assert type(decimal) in (int, float) and decimal == int(decimal) + assert isinstance(decimal, (int, float)) + assert decimal == int(decimal) decimal = int(decimal) hexadecimal = "" negative = False diff --git a/data_structures/binary_tree/binary_search_tree_recursive.py b/data_structures/binary_tree/binary_search_tree_recursive.py index b5b983b9b..13b9b3921 100644 --- a/data_structures/binary_tree/binary_search_tree_recursive.py +++ b/data_structures/binary_tree/binary_search_tree_recursive.py @@ -12,6 +12,8 @@ from __future__ import annotations import unittest from collections.abc import Iterator +import pytest + class Node: def __init__(self, label: int, parent: Node | None) -> None: @@ -78,7 +80,7 @@ class BinarySearchTree: node.right = self._put(node.right, label, node) else: msg = f"Node with label {label} already exists" - raise Exception(msg) + raise ValueError(msg) return node @@ -95,14 +97,14 @@ class BinarySearchTree: >>> node = t.search(3) Traceback (most recent call last): ... - Exception: Node with label 3 does not exist + ValueError: Node with label 3 does not exist """ return self._search(self.root, label) def _search(self, node: Node | None, label: int) -> Node: if node is None: msg = f"Node with label {label} does not exist" - raise Exception(msg) + raise ValueError(msg) else: if label < node.label: node = self._search(node.left, label) @@ -124,7 +126,7 @@ class BinarySearchTree: >>> t.remove(3) Traceback (most recent call last): ... - Exception: Node with label 3 does not exist + ValueError: Node with label 3 does not exist """ node = self.search(label) if node.right and node.left: @@ -179,7 +181,7 @@ class BinarySearchTree: try: self.search(label) return True - except Exception: + except ValueError: return False def get_max_label(self) -> int: @@ -190,7 +192,7 @@ class BinarySearchTree: >>> t.get_max_label() Traceback (most recent call last): ... - Exception: Binary search tree is empty + ValueError: Binary search tree is empty >>> t.put(8) >>> t.put(10) @@ -198,7 +200,7 @@ class BinarySearchTree: 10 """ if self.root is None: - raise Exception("Binary search tree is empty") + raise ValueError("Binary search tree is empty") node = self.root while node.right is not None: @@ -214,7 +216,7 @@ class BinarySearchTree: >>> t.get_min_label() Traceback (most recent call last): ... - Exception: Binary search tree is empty + ValueError: Binary search tree is empty >>> t.put(8) >>> t.put(10) @@ -222,7 +224,7 @@ class BinarySearchTree: 8 """ if self.root is None: - raise Exception("Binary search tree is empty") + raise ValueError("Binary search tree is empty") node = self.root while node.left is not None: @@ -359,7 +361,7 @@ class BinarySearchTreeTest(unittest.TestCase): assert t.root.left.left.parent == t.root.left assert t.root.left.left.label == 1 - with self.assertRaises(Exception): # noqa: B017 + with pytest.raises(ValueError): t.put(1) def test_search(self) -> None: @@ -371,7 +373,7 @@ class BinarySearchTreeTest(unittest.TestCase): node = t.search(13) assert node.label == 13 - with self.assertRaises(Exception): # noqa: B017 + with pytest.raises(ValueError): t.search(2) def test_remove(self) -> None: @@ -517,7 +519,7 @@ class BinarySearchTreeTest(unittest.TestCase): assert t.get_max_label() == 14 t.empty() - with self.assertRaises(Exception): # noqa: B017 + with pytest.raises(ValueError): t.get_max_label() def test_get_min_label(self) -> None: @@ -526,7 +528,7 @@ class BinarySearchTreeTest(unittest.TestCase): assert t.get_min_label() == 1 t.empty() - with self.assertRaises(Exception): # noqa: B017 + with pytest.raises(ValueError): t.get_min_label() def test_inorder_traversal(self) -> None: diff --git a/data_structures/hashing/tests/test_hash_map.py b/data_structures/hashing/tests/test_hash_map.py index 929e67311..4292c0178 100644 --- a/data_structures/hashing/tests/test_hash_map.py +++ b/data_structures/hashing/tests/test_hash_map.py @@ -65,14 +65,14 @@ _add_with_resize_down = [ @pytest.mark.parametrize( "operations", - ( + [ pytest.param(_add_items, id="add items"), pytest.param(_overwrite_items, id="overwrite items"), pytest.param(_delete_items, id="delete items"), pytest.param(_access_absent_items, id="access absent items"), pytest.param(_add_with_resize_up, id="add with resize up"), pytest.param(_add_with_resize_down, id="add with resize down"), - ), + ], ) def test_hash_map_is_the_same_as_dict(operations): my = HashMap(initial_block_size=4) diff --git a/data_structures/linked_list/circular_linked_list.py b/data_structures/linked_list/circular_linked_list.py index ef6658733..54343c80a 100644 --- a/data_structures/linked_list/circular_linked_list.py +++ b/data_structures/linked_list/circular_linked_list.py @@ -124,7 +124,8 @@ class CircularLinkedList: if not 0 <= index < len(self): raise IndexError("list index out of range.") - assert self.head is not None and self.tail is not None + assert self.head is not None + assert self.tail is not None delete_node: Node = self.head if self.head == self.tail: # Just one node self.head = self.tail = None @@ -137,7 +138,8 @@ class CircularLinkedList: for _ in range(index - 1): assert temp is not None temp = temp.next - assert temp is not None and temp.next is not None + assert temp is not None + assert temp.next is not None delete_node = temp.next temp.next = temp.next.next if index == len(self) - 1: # Delete at tail diff --git a/digital_image_processing/test_digital_image_processing.py b/digital_image_processing/test_digital_image_processing.py index 2e5630458..7993110d6 100644 --- a/digital_image_processing/test_digital_image_processing.py +++ b/digital_image_processing/test_digital_image_processing.py @@ -73,7 +73,8 @@ def test_median_filter(): def test_sobel_filter(): grad, theta = sob.sobel_filter(gray) - assert grad.any() and theta.any() + assert grad.any() + assert theta.any() def test_sepia(): diff --git a/graphs/graph_adjacency_list.py b/graphs/graph_adjacency_list.py index 76f34f845..d0b94f03e 100644 --- a/graphs/graph_adjacency_list.py +++ b/graphs/graph_adjacency_list.py @@ -22,6 +22,8 @@ import unittest from pprint import pformat from typing import Generic, TypeVar +import pytest + T = TypeVar("T") @@ -185,9 +187,9 @@ class TestGraphAdjacencyList(unittest.TestCase): directed_graph: GraphAdjacencyList, edge: list[int], ) -> None: - self.assertTrue(undirected_graph.contains_edge(edge[0], edge[1])) - self.assertTrue(undirected_graph.contains_edge(edge[1], edge[0])) - self.assertTrue(directed_graph.contains_edge(edge[0], edge[1])) + assert undirected_graph.contains_edge(edge[0], edge[1]) + assert undirected_graph.contains_edge(edge[1], edge[0]) + assert directed_graph.contains_edge(edge[0], edge[1]) def __assert_graph_edge_does_not_exist_check( self, @@ -195,9 +197,9 @@ class TestGraphAdjacencyList(unittest.TestCase): directed_graph: GraphAdjacencyList, edge: list[int], ) -> None: - self.assertFalse(undirected_graph.contains_edge(edge[0], edge[1])) - self.assertFalse(undirected_graph.contains_edge(edge[1], edge[0])) - self.assertFalse(directed_graph.contains_edge(edge[0], edge[1])) + assert not undirected_graph.contains_edge(edge[0], edge[1]) + assert not undirected_graph.contains_edge(edge[1], edge[0]) + assert not directed_graph.contains_edge(edge[0], edge[1]) def __assert_graph_vertex_exists_check( self, @@ -205,8 +207,8 @@ class TestGraphAdjacencyList(unittest.TestCase): directed_graph: GraphAdjacencyList, vertex: int, ) -> None: - self.assertTrue(undirected_graph.contains_vertex(vertex)) - self.assertTrue(directed_graph.contains_vertex(vertex)) + assert undirected_graph.contains_vertex(vertex) + assert directed_graph.contains_vertex(vertex) def __assert_graph_vertex_does_not_exist_check( self, @@ -214,13 +216,13 @@ class TestGraphAdjacencyList(unittest.TestCase): directed_graph: GraphAdjacencyList, vertex: int, ) -> None: - self.assertFalse(undirected_graph.contains_vertex(vertex)) - self.assertFalse(directed_graph.contains_vertex(vertex)) + assert not undirected_graph.contains_vertex(vertex) + assert not directed_graph.contains_vertex(vertex) def __generate_random_edges( self, vertices: list[int], edge_pick_count: int ) -> list[list[int]]: - self.assertTrue(edge_pick_count <= len(vertices)) + assert edge_pick_count <= len(vertices) random_source_vertices: list[int] = random.sample( vertices[0 : int(len(vertices) / 2)], edge_pick_count @@ -281,8 +283,8 @@ class TestGraphAdjacencyList(unittest.TestCase): self.__assert_graph_edge_exists_check( undirected_graph, directed_graph, edge ) - self.assertFalse(undirected_graph.directed) - self.assertTrue(directed_graph.directed) + assert not undirected_graph.directed + assert directed_graph.directed def test_contains_vertex(self) -> None: random_vertices: list[int] = random.sample(range(101), 20) @@ -297,12 +299,8 @@ class TestGraphAdjacencyList(unittest.TestCase): # Test contains_vertex for num in range(101): - self.assertEqual( - num in random_vertices, undirected_graph.contains_vertex(num) - ) - self.assertEqual( - num in random_vertices, directed_graph.contains_vertex(num) - ) + assert (num in random_vertices) == undirected_graph.contains_vertex(num) + assert (num in random_vertices) == directed_graph.contains_vertex(num) def test_add_vertices(self) -> None: random_vertices: list[int] = random.sample(range(101), 20) @@ -507,9 +505,9 @@ class TestGraphAdjacencyList(unittest.TestCase): ) = self.__generate_graphs(20, 0, 100, 4) for vertex in random_vertices: - with self.assertRaises(ValueError): + with pytest.raises(ValueError): undirected_graph.add_vertex(vertex) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): directed_graph.add_vertex(vertex) def test_remove_vertex_exception_check(self) -> None: @@ -522,9 +520,9 @@ class TestGraphAdjacencyList(unittest.TestCase): for i in range(101): if i not in random_vertices: - with self.assertRaises(ValueError): + with pytest.raises(ValueError): undirected_graph.remove_vertex(i) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): directed_graph.remove_vertex(i) def test_add_edge_exception_check(self) -> None: @@ -536,9 +534,9 @@ class TestGraphAdjacencyList(unittest.TestCase): ) = self.__generate_graphs(20, 0, 100, 4) for edge in random_edges: - with self.assertRaises(ValueError): + with pytest.raises(ValueError): undirected_graph.add_edge(edge[0], edge[1]) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): directed_graph.add_edge(edge[0], edge[1]) def test_remove_edge_exception_check(self) -> None: @@ -560,9 +558,9 @@ class TestGraphAdjacencyList(unittest.TestCase): more_random_edges.append(edge) for edge in more_random_edges: - with self.assertRaises(ValueError): + with pytest.raises(ValueError): undirected_graph.remove_edge(edge[0], edge[1]) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): directed_graph.remove_edge(edge[0], edge[1]) def test_contains_edge_exception_check(self) -> None: @@ -574,14 +572,14 @@ class TestGraphAdjacencyList(unittest.TestCase): ) = self.__generate_graphs(20, 0, 100, 4) for vertex in random_vertices: - with self.assertRaises(ValueError): + with pytest.raises(ValueError): undirected_graph.contains_edge(vertex, 102) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): directed_graph.contains_edge(vertex, 102) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): undirected_graph.contains_edge(103, 102) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): directed_graph.contains_edge(103, 102) diff --git a/graphs/graph_adjacency_matrix.py b/graphs/graph_adjacency_matrix.py index 4d2e02f73..cdef388d9 100644 --- a/graphs/graph_adjacency_matrix.py +++ b/graphs/graph_adjacency_matrix.py @@ -22,6 +22,8 @@ import unittest from pprint import pformat from typing import Generic, TypeVar +import pytest + T = TypeVar("T") @@ -203,9 +205,9 @@ class TestGraphMatrix(unittest.TestCase): directed_graph: GraphAdjacencyMatrix, edge: list[int], ) -> None: - self.assertTrue(undirected_graph.contains_edge(edge[0], edge[1])) - self.assertTrue(undirected_graph.contains_edge(edge[1], edge[0])) - self.assertTrue(directed_graph.contains_edge(edge[0], edge[1])) + assert undirected_graph.contains_edge(edge[0], edge[1]) + assert undirected_graph.contains_edge(edge[1], edge[0]) + assert directed_graph.contains_edge(edge[0], edge[1]) def __assert_graph_edge_does_not_exist_check( self, @@ -213,9 +215,9 @@ class TestGraphMatrix(unittest.TestCase): directed_graph: GraphAdjacencyMatrix, edge: list[int], ) -> None: - self.assertFalse(undirected_graph.contains_edge(edge[0], edge[1])) - self.assertFalse(undirected_graph.contains_edge(edge[1], edge[0])) - self.assertFalse(directed_graph.contains_edge(edge[0], edge[1])) + assert not undirected_graph.contains_edge(edge[0], edge[1]) + assert not undirected_graph.contains_edge(edge[1], edge[0]) + assert not directed_graph.contains_edge(edge[0], edge[1]) def __assert_graph_vertex_exists_check( self, @@ -223,8 +225,8 @@ class TestGraphMatrix(unittest.TestCase): directed_graph: GraphAdjacencyMatrix, vertex: int, ) -> None: - self.assertTrue(undirected_graph.contains_vertex(vertex)) - self.assertTrue(directed_graph.contains_vertex(vertex)) + assert undirected_graph.contains_vertex(vertex) + assert directed_graph.contains_vertex(vertex) def __assert_graph_vertex_does_not_exist_check( self, @@ -232,13 +234,13 @@ class TestGraphMatrix(unittest.TestCase): directed_graph: GraphAdjacencyMatrix, vertex: int, ) -> None: - self.assertFalse(undirected_graph.contains_vertex(vertex)) - self.assertFalse(directed_graph.contains_vertex(vertex)) + assert not undirected_graph.contains_vertex(vertex) + assert not directed_graph.contains_vertex(vertex) def __generate_random_edges( self, vertices: list[int], edge_pick_count: int ) -> list[list[int]]: - self.assertTrue(edge_pick_count <= len(vertices)) + assert edge_pick_count <= len(vertices) random_source_vertices: list[int] = random.sample( vertices[0 : int(len(vertices) / 2)], edge_pick_count @@ -300,8 +302,8 @@ class TestGraphMatrix(unittest.TestCase): undirected_graph, directed_graph, edge ) - self.assertFalse(undirected_graph.directed) - self.assertTrue(directed_graph.directed) + assert not undirected_graph.directed + assert directed_graph.directed def test_contains_vertex(self) -> None: random_vertices: list[int] = random.sample(range(101), 20) @@ -316,12 +318,8 @@ class TestGraphMatrix(unittest.TestCase): # Test contains_vertex for num in range(101): - self.assertEqual( - num in random_vertices, undirected_graph.contains_vertex(num) - ) - self.assertEqual( - num in random_vertices, directed_graph.contains_vertex(num) - ) + assert (num in random_vertices) == undirected_graph.contains_vertex(num) + assert (num in random_vertices) == directed_graph.contains_vertex(num) def test_add_vertices(self) -> None: random_vertices: list[int] = random.sample(range(101), 20) @@ -526,9 +524,9 @@ class TestGraphMatrix(unittest.TestCase): ) = self.__generate_graphs(20, 0, 100, 4) for vertex in random_vertices: - with self.assertRaises(ValueError): + with pytest.raises(ValueError): undirected_graph.add_vertex(vertex) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): directed_graph.add_vertex(vertex) def test_remove_vertex_exception_check(self) -> None: @@ -541,9 +539,9 @@ class TestGraphMatrix(unittest.TestCase): for i in range(101): if i not in random_vertices: - with self.assertRaises(ValueError): + with pytest.raises(ValueError): undirected_graph.remove_vertex(i) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): directed_graph.remove_vertex(i) def test_add_edge_exception_check(self) -> None: @@ -555,9 +553,9 @@ class TestGraphMatrix(unittest.TestCase): ) = self.__generate_graphs(20, 0, 100, 4) for edge in random_edges: - with self.assertRaises(ValueError): + with pytest.raises(ValueError): undirected_graph.add_edge(edge[0], edge[1]) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): directed_graph.add_edge(edge[0], edge[1]) def test_remove_edge_exception_check(self) -> None: @@ -579,9 +577,9 @@ class TestGraphMatrix(unittest.TestCase): more_random_edges.append(edge) for edge in more_random_edges: - with self.assertRaises(ValueError): + with pytest.raises(ValueError): undirected_graph.remove_edge(edge[0], edge[1]) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): directed_graph.remove_edge(edge[0], edge[1]) def test_contains_edge_exception_check(self) -> None: @@ -593,14 +591,14 @@ class TestGraphMatrix(unittest.TestCase): ) = self.__generate_graphs(20, 0, 100, 4) for vertex in random_vertices: - with self.assertRaises(ValueError): + with pytest.raises(ValueError): undirected_graph.contains_edge(vertex, 102) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): directed_graph.contains_edge(vertex, 102) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): undirected_graph.contains_edge(103, 102) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): directed_graph.contains_edge(103, 102) diff --git a/hashes/sha256.py b/hashes/sha256.py index ba9aff8db..bcc83edca 100644 --- a/hashes/sha256.py +++ b/hashes/sha256.py @@ -203,7 +203,7 @@ class SHA256HashTest(unittest.TestCase): import hashlib msg = bytes("Test String", "utf-8") - self.assertEqual(SHA256(msg).hash, hashlib.sha256(msg).hexdigest()) + assert SHA256(msg).hash == hashlib.sha256(msg).hexdigest() def main() -> None: diff --git a/knapsack/tests/test_greedy_knapsack.py b/knapsack/tests/test_greedy_knapsack.py index b7b62d5d8..e6a400841 100644 --- a/knapsack/tests/test_greedy_knapsack.py +++ b/knapsack/tests/test_greedy_knapsack.py @@ -1,5 +1,7 @@ import unittest +import pytest + from knapsack import greedy_knapsack as kp @@ -16,7 +18,7 @@ class TestClass(unittest.TestCase): profit = [10, 20, 30, 40, 50, 60] weight = [2, 4, 6, 8, 10, 12] max_weight = 100 - self.assertEqual(kp.calc_profit(profit, weight, max_weight), 210) + assert kp.calc_profit(profit, weight, max_weight) == 210 def test_negative_max_weight(self): """ @@ -26,7 +28,7 @@ class TestClass(unittest.TestCase): # profit = [10, 20, 30, 40, 50, 60] # weight = [2, 4, 6, 8, 10, 12] # max_weight = -15 - self.assertRaisesRegex(ValueError, "max_weight must greater than zero.") + pytest.raises(ValueError, match="max_weight must greater than zero.") def test_negative_profit_value(self): """ @@ -36,7 +38,7 @@ class TestClass(unittest.TestCase): # profit = [10, -20, 30, 40, 50, 60] # weight = [2, 4, 6, 8, 10, 12] # max_weight = 15 - self.assertRaisesRegex(ValueError, "Weight can not be negative.") + pytest.raises(ValueError, match="Weight can not be negative.") def test_negative_weight_value(self): """ @@ -46,7 +48,7 @@ class TestClass(unittest.TestCase): # profit = [10, 20, 30, 40, 50, 60] # weight = [2, -4, 6, -8, 10, 12] # max_weight = 15 - self.assertRaisesRegex(ValueError, "Profit can not be negative.") + pytest.raises(ValueError, match="Profit can not be negative.") def test_null_max_weight(self): """ @@ -56,7 +58,7 @@ class TestClass(unittest.TestCase): # profit = [10, 20, 30, 40, 50, 60] # weight = [2, 4, 6, 8, 10, 12] # max_weight = null - self.assertRaisesRegex(ValueError, "max_weight must greater than zero.") + pytest.raises(ValueError, match="max_weight must greater than zero.") def test_unequal_list_length(self): """ @@ -66,9 +68,7 @@ class TestClass(unittest.TestCase): # profit = [10, 20, 30, 40, 50] # weight = [2, 4, 6, 8, 10, 12] # max_weight = 100 - self.assertRaisesRegex( - IndexError, "The length of profit and weight must be same." - ) + pytest.raises(IndexError, match="The length of profit and weight must be same.") if __name__ == "__main__": diff --git a/knapsack/tests/test_knapsack.py b/knapsack/tests/test_knapsack.py index 248855fbc..6932bbb35 100644 --- a/knapsack/tests/test_knapsack.py +++ b/knapsack/tests/test_knapsack.py @@ -20,12 +20,12 @@ class Test(unittest.TestCase): val = [0] w = [0] c = len(val) - self.assertEqual(k.knapsack(cap, w, val, c), 0) + assert k.knapsack(cap, w, val, c) == 0 val = [60] w = [10] c = len(val) - self.assertEqual(k.knapsack(cap, w, val, c), 0) + assert k.knapsack(cap, w, val, c) == 0 def test_easy_case(self): """ @@ -35,7 +35,7 @@ class Test(unittest.TestCase): val = [1, 2, 3] w = [3, 2, 1] c = len(val) - self.assertEqual(k.knapsack(cap, w, val, c), 5) + assert k.knapsack(cap, w, val, c) == 5 def test_knapsack(self): """ @@ -45,7 +45,7 @@ class Test(unittest.TestCase): val = [60, 100, 120] w = [10, 20, 30] c = len(val) - self.assertEqual(k.knapsack(cap, w, val, c), 220) + assert k.knapsack(cap, w, val, c) == 220 if __name__ == "__main__": diff --git a/linear_algebra/src/lib.py b/linear_algebra/src/lib.py index e3556e74c..5074faf31 100644 --- a/linear_algebra/src/lib.py +++ b/linear_algebra/src/lib.py @@ -200,7 +200,8 @@ def unit_basis_vector(dimension: int, pos: int) -> Vector: at index 'pos' (indexing at 0) """ # precondition - assert isinstance(dimension, int) and (isinstance(pos, int)) + assert isinstance(dimension, int) + assert isinstance(pos, int) ans = [0] * dimension ans[pos] = 1 return Vector(ans) @@ -213,11 +214,9 @@ def axpy(scalar: float, x: Vector, y: Vector) -> Vector: computes the axpy operation """ # precondition - assert ( - isinstance(x, Vector) - and isinstance(y, Vector) - and (isinstance(scalar, (int, float))) - ) + assert isinstance(x, Vector) + assert isinstance(y, Vector) + assert isinstance(scalar, (int, float)) return x * scalar + y diff --git a/linear_algebra/src/schur_complement.py b/linear_algebra/src/schur_complement.py index 750f4de5e..1cc084043 100644 --- a/linear_algebra/src/schur_complement.py +++ b/linear_algebra/src/schur_complement.py @@ -1,6 +1,7 @@ import unittest import numpy as np +import pytest def schur_complement( @@ -70,14 +71,14 @@ class TestSchurComplement(unittest.TestCase): det_a = np.linalg.det(a) det_s = np.linalg.det(s) - self.assertAlmostEqual(det_x, det_a * det_s) + assert np.is_close(det_x, det_a * det_s) def test_improper_a_b_dimensions(self) -> None: a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]]) b = np.array([[0, 3], [3, 0], [2, 3]]) c = np.array([[2, 1], [6, 3]]) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): schur_complement(a, b, c) def test_improper_b_c_dimensions(self) -> None: @@ -85,7 +86,7 @@ class TestSchurComplement(unittest.TestCase): b = np.array([[0, 3], [3, 0], [2, 3]]) c = np.array([[2, 1, 3], [6, 3, 5]]) - with self.assertRaises(ValueError): + with pytest.raises(ValueError): schur_complement(a, b, c) diff --git a/linear_algebra/src/test_linear_algebra.py b/linear_algebra/src/test_linear_algebra.py index 50d079572..95ab408b3 100644 --- a/linear_algebra/src/test_linear_algebra.py +++ b/linear_algebra/src/test_linear_algebra.py @@ -8,6 +8,8 @@ This file contains the test-suite for the linear algebra library. """ import unittest +import pytest + from .lib import ( Matrix, Vector, @@ -24,8 +26,8 @@ class Test(unittest.TestCase): test for method component() """ x = Vector([1, 2, 3]) - self.assertEqual(x.component(0), 1) - self.assertEqual(x.component(2), 3) + assert x.component(0) == 1 + assert x.component(2) == 3 _ = Vector() def test_str(self) -> None: @@ -33,14 +35,14 @@ class Test(unittest.TestCase): test for method toString() """ x = Vector([0, 0, 0, 0, 0, 1]) - self.assertEqual(str(x), "(0,0,0,0,0,1)") + assert str(x) == "(0,0,0,0,0,1)" def test_size(self) -> None: """ test for method size() """ x = Vector([1, 2, 3, 4]) - self.assertEqual(len(x), 4) + assert len(x) == 4 def test_euclidean_length(self) -> None: """ @@ -50,10 +52,10 @@ class Test(unittest.TestCase): y = Vector([1, 2, 3, 4, 5]) z = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) w = Vector([1, -1, 1, -1, 2, -3, 4, -5]) - self.assertAlmostEqual(x.euclidean_length(), 2.236, 3) - self.assertAlmostEqual(y.euclidean_length(), 7.416, 3) - self.assertEqual(z.euclidean_length(), 0) - self.assertAlmostEqual(w.euclidean_length(), 7.616, 3) + assert x.euclidean_length() == pytest.approx(2.236, abs=1e-3) + assert y.euclidean_length() == pytest.approx(7.416, abs=1e-3) + assert z.euclidean_length() == 0 + assert w.euclidean_length() == pytest.approx(7.616, abs=1e-3) def test_add(self) -> None: """ @@ -61,9 +63,9 @@ class Test(unittest.TestCase): """ x = Vector([1, 2, 3]) y = Vector([1, 1, 1]) - self.assertEqual((x + y).component(0), 2) - self.assertEqual((x + y).component(1), 3) - self.assertEqual((x + y).component(2), 4) + assert (x + y).component(0) == 2 + assert (x + y).component(1) == 3 + assert (x + y).component(2) == 4 def test_sub(self) -> None: """ @@ -71,9 +73,9 @@ class Test(unittest.TestCase): """ x = Vector([1, 2, 3]) y = Vector([1, 1, 1]) - self.assertEqual((x - y).component(0), 0) - self.assertEqual((x - y).component(1), 1) - self.assertEqual((x - y).component(2), 2) + assert (x - y).component(0) == 0 + assert (x - y).component(1) == 1 + assert (x - y).component(2) == 2 def test_mul(self) -> None: """ @@ -82,20 +84,20 @@ class Test(unittest.TestCase): x = Vector([1, 2, 3]) a = Vector([2, -1, 4]) # for test of dot product b = Vector([1, -2, -1]) - self.assertEqual(str(x * 3.0), "(3.0,6.0,9.0)") - self.assertEqual((a * b), 0) + assert str(x * 3.0) == "(3.0,6.0,9.0)" + assert a * b == 0 def test_zero_vector(self) -> None: """ test for global function zero_vector() """ - self.assertEqual(str(zero_vector(10)).count("0"), 10) + assert str(zero_vector(10)).count("0") == 10 def test_unit_basis_vector(self) -> None: """ test for global function unit_basis_vector() """ - self.assertEqual(str(unit_basis_vector(3, 1)), "(0,1,0)") + assert str(unit_basis_vector(3, 1)) == "(0,1,0)" def test_axpy(self) -> None: """ @@ -103,7 +105,7 @@ class Test(unittest.TestCase): """ x = Vector([1, 2, 3]) y = Vector([1, 0, 1]) - self.assertEqual(str(axpy(2, x, y)), "(3,4,7)") + assert str(axpy(2, x, y)) == "(3,4,7)" def test_copy(self) -> None: """ @@ -111,7 +113,7 @@ class Test(unittest.TestCase): """ x = Vector([1, 0, 0, 0, 0, 0]) y = x.copy() - self.assertEqual(str(x), str(y)) + assert str(x) == str(y) def test_change_component(self) -> None: """ @@ -120,14 +122,14 @@ class Test(unittest.TestCase): x = Vector([1, 0, 0]) x.change_component(0, 0) x.change_component(1, 1) - self.assertEqual(str(x), "(0,1,0)") + assert str(x) == "(0,1,0)" def test_str_matrix(self) -> None: """ test for Matrix method str() """ a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) - self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n", str(a)) + assert str(a) == "|1,2,3|\n|2,4,5|\n|6,7,8|\n" def test_minor(self) -> None: """ @@ -137,7 +139,7 @@ class Test(unittest.TestCase): minors = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height()): for y in range(a.width()): - self.assertEqual(minors[x][y], a.minor(x, y)) + assert minors[x][y] == a.minor(x, y) def test_cofactor(self) -> None: """ @@ -147,14 +149,14 @@ class Test(unittest.TestCase): cofactors = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height()): for y in range(a.width()): - self.assertEqual(cofactors[x][y], a.cofactor(x, y)) + assert cofactors[x][y] == a.cofactor(x, y) def test_determinant(self) -> None: """ test for Matrix method determinant() """ a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) - self.assertEqual(-5, a.determinant()) + assert a.determinant() == -5 def test__mul__matrix(self) -> None: """ @@ -162,8 +164,8 @@ class Test(unittest.TestCase): """ a = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 3, 3) x = Vector([1, 2, 3]) - self.assertEqual("(14,32,50)", str(a * x)) - self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n", str(a * 2)) + assert str(a * x) == "(14,32,50)" + assert str(a * 2) == "|2,4,6|\n|8,10,12|\n|14,16,18|\n" def test_change_component_matrix(self) -> None: """ @@ -171,14 +173,14 @@ class Test(unittest.TestCase): """ a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) a.change_component(0, 2, 5) - self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n", str(a)) + assert str(a) == "|1,2,5|\n|2,4,5|\n|6,7,8|\n" def test_component_matrix(self) -> None: """ test for Matrix method component() """ a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) - self.assertEqual(7, a.component(2, 1), 0.01) + assert a.component(2, 1) == 7, 0.01 def test__add__matrix(self) -> None: """ @@ -186,7 +188,7 @@ class Test(unittest.TestCase): """ a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) b = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3) - self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n", str(a + b)) + assert str(a + b) == "|2,4,10|\n|4,8,10|\n|12,14,18|\n" def test__sub__matrix(self) -> None: """ @@ -194,15 +196,14 @@ class Test(unittest.TestCase): """ a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3) b = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3) - self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n", str(a - b)) + assert str(a - b) == "|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" def test_square_zero_matrix(self) -> None: """ test for global function square_zero_matrix() """ - self.assertEqual( - "|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n", - str(square_zero_matrix(5)), + assert str(square_zero_matrix(5)) == ( + "|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" ) diff --git a/machine_learning/dimensionality_reduction.py b/machine_learning/dimensionality_reduction.py index d2046f81a..50d442ecc 100644 --- a/machine_learning/dimensionality_reduction.py +++ b/machine_learning/dimensionality_reduction.py @@ -169,7 +169,7 @@ def test_linear_discriminant_analysis() -> None: dimensions = 2 # Assert that the function raises an AssertionError if dimensions > classes - with pytest.raises(AssertionError) as error_info: + with pytest.raises(AssertionError) as error_info: # noqa: PT012 projected_data = linear_discriminant_analysis( features, labels, classes, dimensions ) @@ -185,7 +185,7 @@ def test_principal_component_analysis() -> None: dimensions = 2 expected_output = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]]) - with pytest.raises(AssertionError) as error_info: + with pytest.raises(AssertionError) as error_info: # noqa: PT012 output = principal_component_analysis(features, dimensions) if not np.allclose(expected_output, output): raise AssertionError diff --git a/machine_learning/k_means_clust.py b/machine_learning/k_means_clust.py index 3fe151442..ebad66ac8 100644 --- a/machine_learning/k_means_clust.py +++ b/machine_learning/k_means_clust.py @@ -128,7 +128,7 @@ def plot_heterogeneity(heterogeneity, k): def kmeans( data, k, initial_centroids, maxiter=500, record_heterogeneity=None, verbose=False ): - """This function runs k-means on given data and initial set of centroids. + """Runs k-means on given data and initial set of centroids. maxiter: maximum number of iterations to run.(default=500) record_heterogeneity: (optional) a list, to store the history of heterogeneity as function of iterations @@ -195,20 +195,20 @@ if False: # change to true to run this test case. def report_generator( - df: pd.DataFrame, clustering_variables: np.ndarray, fill_missing_report=None + predicted: pd.DataFrame, clustering_variables: np.ndarray, fill_missing_report=None ) -> pd.DataFrame: """ - Generates a clustering report. This function takes 2 arguments as input: - df - dataframe with predicted cluster column + Generate a clustering report given these two arguments: + predicted - dataframe with predicted cluster column fill_missing_report - dictionary of rules on how we are going to fill in missing values for final generated report (not included in modelling); - >>> data = pd.DataFrame() - >>> data['numbers'] = [1, 2, 3] - >>> data['col1'] = [0.5, 2.5, 4.5] - >>> data['col2'] = [100, 200, 300] - >>> data['col3'] = [10, 20, 30] - >>> data['Cluster'] = [1, 1, 2] - >>> report_generator(data, ['col1', 'col2'], 0) + >>> predicted = pd.DataFrame() + >>> predicted['numbers'] = [1, 2, 3] + >>> predicted['col1'] = [0.5, 2.5, 4.5] + >>> predicted['col2'] = [100, 200, 300] + >>> predicted['col3'] = [10, 20, 30] + >>> predicted['Cluster'] = [1, 1, 2] + >>> report_generator(predicted, ['col1', 'col2'], 0) Features Type Mark 1 2 0 # of Customers ClusterSize False 2.000000 1.000000 1 % of Customers ClusterProportion False 0.666667 0.333333 @@ -226,11 +226,11 @@ def report_generator( """ # Fill missing values with given rules if fill_missing_report: - df = df.fillna(value=fill_missing_report) - df["dummy"] = 1 - numeric_cols = df.select_dtypes(np.number).columns + predicted = predicted.fillna(value=fill_missing_report) + predicted["dummy"] = 1 + numeric_cols = predicted.select_dtypes(np.number).columns report = ( - df.groupby(["Cluster"])[ # construct report dataframe + predicted.groupby(["Cluster"])[ # construct report dataframe numeric_cols ] # group by cluster number .agg( @@ -267,46 +267,43 @@ def report_generator( .rename(index=str, columns={"level_0": "Features", "level_1": "Type"}) ) # rename columns # calculate the size of cluster(count of clientID's) + # avoid SettingWithCopyWarning clustersize = report[ (report["Features"] == "dummy") & (report["Type"] == "count") - ].copy() # avoid SettingWithCopyWarning - clustersize.Type = ( - "ClusterSize" # rename created cluster df to match report column names - ) + ].copy() + # rename created predicted cluster to match report column names + clustersize.Type = "ClusterSize" clustersize.Features = "# of Customers" + # calculating the proportion of cluster clusterproportion = pd.DataFrame( - clustersize.iloc[:, 2:].values - / clustersize.iloc[:, 2:].values.sum() # calculating the proportion of cluster + clustersize.iloc[:, 2:].to_numpy() / clustersize.iloc[:, 2:].to_numpy().sum() ) - clusterproportion[ - "Type" - ] = "% of Customers" # rename created cluster df to match report column names + # rename created predicted cluster to match report column names + clusterproportion["Type"] = "% of Customers" clusterproportion["Features"] = "ClusterProportion" cols = clusterproportion.columns.tolist() cols = cols[-2:] + cols[:-2] clusterproportion = clusterproportion[cols] # rearrange columns to match report clusterproportion.columns = report.columns + # generating dataframe with count of nan values a = pd.DataFrame( abs( - report[report["Type"] == "count"].iloc[:, 2:].values - - clustersize.iloc[:, 2:].values + report[report["Type"] == "count"].iloc[:, 2:].to_numpy() + - clustersize.iloc[:, 2:].to_numpy() ) - ) # generating df with count of nan values + ) a["Features"] = 0 a["Type"] = "# of nan" - a.Features = report[ - report["Type"] == "count" - ].Features.tolist() # filling values in order to match report + # filling values in order to match report + a.Features = report[report["Type"] == "count"].Features.tolist() cols = a.columns.tolist() cols = cols[-2:] + cols[:-2] a = a[cols] # rearrange columns to match report a.columns = report.columns # rename columns to match report - report = report.drop( - report[report.Type == "count"].index - ) # drop count values except for cluster size - report = pd.concat( - [report, a, clustersize, clusterproportion], axis=0 - ) # concat report with cluster size and nan values + # drop count values except for cluster size + report = report.drop(report[report.Type == "count"].index) + # concat report with cluster size and nan values + report = pd.concat([report, a, clustersize, clusterproportion], axis=0) report["Mark"] = report["Features"].isin(clustering_variables) cols = report.columns.tolist() cols = cols[0:2] + cols[-1:] + cols[2:-1] diff --git a/maths/least_common_multiple.py b/maths/least_common_multiple.py index 4f28da8ab..a5c4bf8e3 100644 --- a/maths/least_common_multiple.py +++ b/maths/least_common_multiple.py @@ -67,8 +67,8 @@ class TestLeastCommonMultiple(unittest.TestCase): slow_result = least_common_multiple_slow(first_num, second_num) fast_result = least_common_multiple_fast(first_num, second_num) with self.subTest(i=i): - self.assertEqual(slow_result, self.expected_results[i]) - self.assertEqual(fast_result, self.expected_results[i]) + assert slow_result == self.expected_results[i] + assert fast_result == self.expected_results[i] if __name__ == "__main__": diff --git a/maths/modular_division.py b/maths/modular_division.py index a9d0f65c5..260d56837 100644 --- a/maths/modular_division.py +++ b/maths/modular_division.py @@ -28,7 +28,9 @@ def modular_division(a: int, b: int, n: int) -> int: 4 """ - assert n > 1 and a > 0 and greatest_common_divisor(a, n) == 1 + assert n > 1 + assert a > 0 + assert greatest_common_divisor(a, n) == 1 (d, t, s) = extended_gcd(n, a) # Implemented below x = (b * s) % n return x @@ -86,7 +88,8 @@ def extended_gcd(a: int, b: int) -> tuple[int, int, int]: ** extended_gcd function is used when d = gcd(a,b) is required in output """ - assert a >= 0 and b >= 0 + assert a >= 0 + assert b >= 0 if b == 0: d, x, y = a, 1, 0 @@ -95,7 +98,8 @@ def extended_gcd(a: int, b: int) -> tuple[int, int, int]: x = q y = p - q * (a // b) - assert a % d == 0 and b % d == 0 + assert a % d == 0 + assert b % d == 0 assert d == a * x + b * y return (d, x, y) diff --git a/maths/prime_check.py b/maths/prime_check.py index 80ab8bc5d..c17877a57 100644 --- a/maths/prime_check.py +++ b/maths/prime_check.py @@ -3,6 +3,8 @@ import math import unittest +import pytest + def is_prime(number: int) -> bool: """Checks to see if a number is a prime in O(sqrt(n)). @@ -50,33 +52,31 @@ def is_prime(number: int) -> bool: class Test(unittest.TestCase): def test_primes(self): - self.assertTrue(is_prime(2)) - self.assertTrue(is_prime(3)) - self.assertTrue(is_prime(5)) - self.assertTrue(is_prime(7)) - self.assertTrue(is_prime(11)) - self.assertTrue(is_prime(13)) - self.assertTrue(is_prime(17)) - self.assertTrue(is_prime(19)) - self.assertTrue(is_prime(23)) - self.assertTrue(is_prime(29)) + assert is_prime(2) + assert is_prime(3) + assert is_prime(5) + assert is_prime(7) + assert is_prime(11) + assert is_prime(13) + assert is_prime(17) + assert is_prime(19) + assert is_prime(23) + assert is_prime(29) def test_not_primes(self): - with self.assertRaises(AssertionError): + with pytest.raises(AssertionError): is_prime(-19) - self.assertFalse( - is_prime(0), - "Zero doesn't have any positive factors, primes must have exactly two.", - ) - self.assertFalse( - is_prime(1), - "One only has 1 positive factor, primes must have exactly two.", - ) - self.assertFalse(is_prime(2 * 2)) - self.assertFalse(is_prime(2 * 3)) - self.assertFalse(is_prime(3 * 3)) - self.assertFalse(is_prime(3 * 5)) - self.assertFalse(is_prime(3 * 5 * 7)) + assert not is_prime( + 0 + ), "Zero doesn't have any positive factors, primes must have exactly two." + assert not is_prime( + 1 + ), "One only has 1 positive factor, primes must have exactly two." + assert not is_prime(2 * 2) + assert not is_prime(2 * 3) + assert not is_prime(3 * 3) + assert not is_prime(3 * 5) + assert not is_prime(3 * 5 * 7) if __name__ == "__main__": diff --git a/matrix/sherman_morrison.py b/matrix/sherman_morrison.py index b6e50f70f..7f10ae706 100644 --- a/matrix/sherman_morrison.py +++ b/matrix/sherman_morrison.py @@ -114,7 +114,8 @@ class Matrix: # Validation assert isinstance(another, Matrix) - assert self.row == another.row and self.column == another.column + assert self.row == another.row + assert self.column == another.column # Add result = Matrix(self.row, self.column) @@ -225,7 +226,8 @@ class Matrix: """ # Size validation - assert isinstance(u, Matrix) and isinstance(v, Matrix) + assert isinstance(u, Matrix) + assert isinstance(v, Matrix) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector diff --git a/matrix/tests/test_matrix_operation.py b/matrix/tests/test_matrix_operation.py index 65b35fd7e..638f97daa 100644 --- a/matrix/tests/test_matrix_operation.py +++ b/matrix/tests/test_matrix_operation.py @@ -31,14 +31,14 @@ stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) -@pytest.mark.mat_ops +@pytest.mark.mat_ops() @pytest.mark.parametrize( ("mat1", "mat2"), [(mat_a, mat_b), (mat_c, mat_d), (mat_d, mat_e), (mat_f, mat_h)] ) def test_addition(mat1, mat2): if (np.array(mat1)).shape < (2, 2) or (np.array(mat2)).shape < (2, 2): + logger.info(f"\n\t{test_addition.__name__} returned integer") with pytest.raises(TypeError): - logger.info(f"\n\t{test_addition.__name__} returned integer") matop.add(mat1, mat2) elif (np.array(mat1)).shape == (np.array(mat2)).shape: logger.info(f"\n\t{test_addition.__name__} with same matrix dims") @@ -46,19 +46,19 @@ def test_addition(mat1, mat2): theo = matop.add(mat1, mat2) assert theo == act else: + logger.info(f"\n\t{test_addition.__name__} with different matrix dims") with pytest.raises(ValueError): - logger.info(f"\n\t{test_addition.__name__} with different matrix dims") matop.add(mat1, mat2) -@pytest.mark.mat_ops +@pytest.mark.mat_ops() @pytest.mark.parametrize( ("mat1", "mat2"), [(mat_a, mat_b), (mat_c, mat_d), (mat_d, mat_e), (mat_f, mat_h)] ) def test_subtraction(mat1, mat2): if (np.array(mat1)).shape < (2, 2) or (np.array(mat2)).shape < (2, 2): + logger.info(f"\n\t{test_subtraction.__name__} returned integer") with pytest.raises(TypeError): - logger.info(f"\n\t{test_subtraction.__name__} returned integer") matop.subtract(mat1, mat2) elif (np.array(mat1)).shape == (np.array(mat2)).shape: logger.info(f"\n\t{test_subtraction.__name__} with same matrix dims") @@ -66,12 +66,12 @@ def test_subtraction(mat1, mat2): theo = matop.subtract(mat1, mat2) assert theo == act else: + logger.info(f"\n\t{test_subtraction.__name__} with different matrix dims") with pytest.raises(ValueError): - logger.info(f"\n\t{test_subtraction.__name__} with different matrix dims") assert matop.subtract(mat1, mat2) -@pytest.mark.mat_ops +@pytest.mark.mat_ops() @pytest.mark.parametrize( ("mat1", "mat2"), [(mat_a, mat_b), (mat_c, mat_d), (mat_d, mat_e), (mat_f, mat_h)] ) @@ -86,33 +86,33 @@ def test_multiplication(mat1, mat2): theo = matop.multiply(mat1, mat2) assert theo == act else: + logger.info( + f"\n\t{test_multiplication.__name__} does not meet dim requirements" + ) with pytest.raises(ValueError): - logger.info( - f"\n\t{test_multiplication.__name__} does not meet dim requirements" - ) assert matop.subtract(mat1, mat2) -@pytest.mark.mat_ops +@pytest.mark.mat_ops() def test_scalar_multiply(): act = (3.5 * np.array(mat_a)).tolist() theo = matop.scalar_multiply(mat_a, 3.5) assert theo == act -@pytest.mark.mat_ops +@pytest.mark.mat_ops() def test_identity(): act = (np.identity(5)).tolist() theo = matop.identity(5) assert theo == act -@pytest.mark.mat_ops +@pytest.mark.mat_ops() @pytest.mark.parametrize("mat", [mat_a, mat_b, mat_c, mat_d, mat_e, mat_f]) def test_transpose(mat): if (np.array(mat)).shape < (2, 2): + logger.info(f"\n\t{test_transpose.__name__} returned integer") with pytest.raises(TypeError): - logger.info(f"\n\t{test_transpose.__name__} returned integer") matop.transpose(mat) else: act = (np.transpose(mat)).tolist() diff --git a/project_euler/problem_054/test_poker_hand.py b/project_euler/problem_054/test_poker_hand.py index 5735bfc37..ba5e0c8a2 100644 --- a/project_euler/problem_054/test_poker_hand.py +++ b/project_euler/problem_054/test_poker_hand.py @@ -147,39 +147,39 @@ def generate_random_hands(number_of_hands: int = 100): return (generate_random_hand() for _ in range(number_of_hands)) -@pytest.mark.parametrize("hand, expected", TEST_FLUSH) +@pytest.mark.parametrize(("hand", "expected"), TEST_FLUSH) def test_hand_is_flush(hand, expected): assert PokerHand(hand)._is_flush() == expected -@pytest.mark.parametrize("hand, expected", TEST_STRAIGHT) +@pytest.mark.parametrize(("hand", "expected"), TEST_STRAIGHT) def test_hand_is_straight(hand, expected): assert PokerHand(hand)._is_straight() == expected -@pytest.mark.parametrize("hand, expected, card_values", TEST_FIVE_HIGH_STRAIGHT) +@pytest.mark.parametrize(("hand", "expected", "card_values"), TEST_FIVE_HIGH_STRAIGHT) def test_hand_is_five_high_straight(hand, expected, card_values): player = PokerHand(hand) assert player._is_five_high_straight() == expected assert player._card_values == card_values -@pytest.mark.parametrize("hand, expected", TEST_KIND) +@pytest.mark.parametrize(("hand", "expected"), TEST_KIND) def test_hand_is_same_kind(hand, expected): assert PokerHand(hand)._is_same_kind() == expected -@pytest.mark.parametrize("hand, expected", TEST_TYPES) +@pytest.mark.parametrize(("hand", "expected"), TEST_TYPES) def test_hand_values(hand, expected): assert PokerHand(hand)._hand_type == expected -@pytest.mark.parametrize("hand, other, expected", TEST_COMPARE) +@pytest.mark.parametrize(("hand", "other", "expected"), TEST_COMPARE) def test_compare_simple(hand, other, expected): assert PokerHand(hand).compare_with(PokerHand(other)) == expected -@pytest.mark.parametrize("hand, other, expected", generate_random_hands()) +@pytest.mark.parametrize(("hand", "other", "expected"), generate_random_hands()) def test_compare_random(hand, other, expected): assert PokerHand(hand).compare_with(PokerHand(other)) == expected diff --git a/pyproject.toml b/pyproject.toml index 75da7a045..fe5f2f09c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,6 +19,8 @@ ignore = [ # `ruff rule S101` for a description of that rule "PLW0120", # `else` clause on loop without a `break` statement -- FIX ME "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX "PLW2901", # PLW2901: Redefined loop variable -- FIX ME + "PT011", # `pytest.raises(Exception)` is too broad, set the `match` parameter or use a more specific exception + "PT018", # Assertion should be broken down into multiple parts "RUF00", # Ambiguous unicode character and other rules "RUF100", # Unused `noqa` directive -- FIX ME "S101", # Use of `assert` detected -- DO NOT FIX @@ -37,6 +39,7 @@ select = [ # https://beta.ruff.rs/docs/rules "BLE", # flake8-blind-except "C4", # flake8-comprehensions "C90", # McCabe cyclomatic complexity + "DJ", # flake8-django "DTZ", # flake8-datetimez "E", # pycodestyle "EM", # flake8-errmsg @@ -52,9 +55,11 @@ select = [ # https://beta.ruff.rs/docs/rules "ISC", # flake8-implicit-str-concat "N", # pep8-naming "NPY", # NumPy-specific rules + "PD", # pandas-vet "PGH", # pygrep-hooks "PIE", # flake8-pie "PL", # Pylint + "PT", # flake8-pytest-style "PYI", # flake8-pyi "RSE", # flake8-raise "RUF", # Ruff-specific rules @@ -70,11 +75,8 @@ select = [ # https://beta.ruff.rs/docs/rules # "ANN", # flake8-annotations # FIX ME? # "COM", # flake8-commas # "D", # pydocstyle -- FIX ME? - # "DJ", # flake8-django # "ERA", # eradicate -- DO NOT FIX # "FBT", # flake8-boolean-trap # FIX ME - # "PD", # pandas-vet - # "PT", # flake8-pytest-style # "PTH", # flake8-use-pathlib # FIX ME # "Q", # flake8-quotes # "RET", # flake8-return # FIX ME? diff --git a/strings/knuth_morris_pratt.py b/strings/knuth_morris_pratt.py index 8a04eb253..5120779c5 100644 --- a/strings/knuth_morris_pratt.py +++ b/strings/knuth_morris_pratt.py @@ -71,7 +71,8 @@ if __name__ == "__main__": pattern = "abc1abc12" text1 = "alskfjaldsabc1abc1abc12k23adsfabcabc" text2 = "alskfjaldsk23adsfabcabc" - assert knuth_morris_pratt(text1, pattern) and knuth_morris_pratt(text2, pattern) + assert knuth_morris_pratt(text1, pattern) + assert knuth_morris_pratt(text2, pattern) # Test 2) pattern = "ABABX" diff --git a/strings/rabin_karp.py b/strings/rabin_karp.py index 532c689f8..9c0d0fe5c 100644 --- a/strings/rabin_karp.py +++ b/strings/rabin_karp.py @@ -60,7 +60,8 @@ def test_rabin_karp() -> None: pattern = "abc1abc12" text1 = "alskfjaldsabc1abc1abc12k23adsfabcabc" text2 = "alskfjaldsk23adsfabcabc" - assert rabin_karp(pattern, text1) and not rabin_karp(pattern, text2) + assert rabin_karp(pattern, text1) + assert not rabin_karp(pattern, text2) # Test 2) pattern = "ABABX" From 92fbe60082b782d8b85e9667bd6d7832b5383fa3 Mon Sep 17 00:00:00 2001 From: Vipin Karthic <143083087+vipinkarthic@users.noreply.github.com> Date: Thu, 12 Oct 2023 00:35:24 +0530 Subject: [PATCH 097/306] Added doctests to carmichael_number.py (#10210) Co-authored-by: Tianyi Zheng --- maths/carmichael_number.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/maths/carmichael_number.py b/maths/carmichael_number.py index 08b5c70e8..c73908545 100644 --- a/maths/carmichael_number.py +++ b/maths/carmichael_number.py @@ -16,11 +16,9 @@ from maths.greatest_common_divisor import greatest_common_divisor def power(x: int, y: int, mod: int) -> int: """ - Examples: >>> power(2, 15, 3) 2 - >>> power(5, 1, 30) 5 """ @@ -36,14 +34,19 @@ def power(x: int, y: int, mod: int) -> int: def is_carmichael_number(n: int) -> bool: """ - Examples: - >>> is_carmichael_number(562) + >>> is_carmichael_number(4) False - >>> is_carmichael_number(561) True - + >>> is_carmichael_number(562) + False + >>> is_carmichael_number(900) + False + >>> is_carmichael_number(1105) + True + >>> is_carmichael_number(8911) + True >>> is_carmichael_number(5.1) Traceback (most recent call last): ... From 09ce6b23d7529aa0e02a6b5cfef1a9b831a3c9ad Mon Sep 17 00:00:00 2001 From: Siddharth Warrier <117698635+siddwarr@users.noreply.github.com> Date: Thu, 12 Oct 2023 14:38:55 +0530 Subject: [PATCH 098/306] Count pairs with given sum (#10282) * added power_of_4 * deleted power_of_4 * added pairs_with_given_sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated the comment * updated return hint * updated type hints * updated the variable * updated annotation * updated code * updated code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added the problem link and used defaultdict * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * corrected import formatting * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update pairs_with_given_sum.py * Update data_structures/arrays/pairs_with_given_sum.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../arrays/pairs_with_given_sum.py | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 data_structures/arrays/pairs_with_given_sum.py diff --git a/data_structures/arrays/pairs_with_given_sum.py b/data_structures/arrays/pairs_with_given_sum.py new file mode 100644 index 000000000..c4a5ceeae --- /dev/null +++ b/data_structures/arrays/pairs_with_given_sum.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 + +""" +Given an array of integers and an integer req_sum, find the number of pairs of array +elements whose sum is equal to req_sum. + +https://practice.geeksforgeeks.org/problems/count-pairs-with-given-sum5022/0 +""" +from itertools import combinations + + +def pairs_with_sum(arr: list, req_sum: int) -> int: + """ + Return the no. of pairs with sum "sum" + >>> pairs_with_sum([1, 5, 7, 1], 6) + 2 + >>> pairs_with_sum([1, 1, 1, 1, 1, 1, 1, 1], 2) + 28 + >>> pairs_with_sum([1, 7, 6, 2, 5, 4, 3, 1, 9, 8], 7) + 4 + """ + return len([1 for a, b in combinations(arr, 2) if a + b == req_sum]) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 7ea812996c8ee1fa2eb9fbc72b7caaae8eb8ff0e Mon Sep 17 00:00:00 2001 From: Poojan Smart <44301271+PoojanSmart@users.noreply.github.com> Date: Thu, 12 Oct 2023 19:24:07 +0530 Subject: [PATCH 099/306] Adds exponential moving average algorithm (#10273) * Adds exponential moving average algorithm * code clean up * spell correction * Modifies I/O types of function * Replaces generator function * Resolved mypy type error * readibility of code and documentation * Update exponential_moving_average.py --------- Co-authored-by: Christian Clauss --- financial/exponential_moving_average.py | 73 +++++++++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 financial/exponential_moving_average.py diff --git a/financial/exponential_moving_average.py b/financial/exponential_moving_average.py new file mode 100644 index 000000000..0b6cea3b4 --- /dev/null +++ b/financial/exponential_moving_average.py @@ -0,0 +1,73 @@ +""" + Calculate the exponential moving average (EMA) on the series of stock prices. + Wikipedia Reference: https://en.wikipedia.org/wiki/Exponential_smoothing + https://www.investopedia.com/terms/e/ema.asp#toc-what-is-an-exponential + -moving-average-ema + + Exponential moving average is used in finance to analyze changes stock prices. + EMA is used in conjunction with Simple moving average (SMA), EMA reacts to the + changes in the value quicker than SMA, which is one of the advantages of using EMA. +""" + +from collections.abc import Iterator + + +def exponential_moving_average( + stock_prices: Iterator[float], window_size: int +) -> Iterator[float]: + """ + Yields exponential moving averages of the given stock prices. + >>> tuple(exponential_moving_average(iter([2, 5, 3, 8.2, 6, 9, 10]), 3)) + (2, 3.5, 3.25, 5.725, 5.8625, 7.43125, 8.715625) + + :param stock_prices: A stream of stock prices + :param window_size: The number of stock prices that will trigger a new calculation + of the exponential average (window_size > 0) + :return: Yields a sequence of exponential moving averages + + Formula: + + st = alpha * xt + (1 - alpha) * st_prev + + Where, + st : Exponential moving average at timestamp t + xt : stock price in from the stock prices at timestamp t + st_prev : Exponential moving average at timestamp t-1 + alpha : 2/(1 + window_size) - smoothing factor + + Exponential moving average (EMA) is a rule of thumb technique for + smoothing time series data using an exponential window function. + """ + + if window_size <= 0: + raise ValueError("window_size must be > 0") + + # Calculating smoothing factor + alpha = 2 / (1 + window_size) + + # Exponential average at timestamp t + moving_average = 0.0 + + for i, stock_price in enumerate(stock_prices): + if i <= window_size: + # Assigning simple moving average till the window_size for the first time + # is reached + moving_average = (moving_average + stock_price) * 0.5 if i else stock_price + else: + # Calculating exponential moving average based on current timestamp data + # point and previous exponential average value + moving_average = (alpha * stock_price) + ((1 - alpha) * moving_average) + yield moving_average + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + stock_prices = [2.0, 5, 3, 8.2, 6, 9, 10] + window_size = 3 + result = tuple(exponential_moving_average(iter(stock_prices), window_size)) + print(f"{stock_prices = }") + print(f"{window_size = }") + print(f"{result = }") From ecf21bfc87c1d1cd4730e628279b609151bc6c57 Mon Sep 17 00:00:00 2001 From: Daniela Large <133594563+dannylarge144@users.noreply.github.com> Date: Thu, 12 Oct 2023 16:51:06 +0100 Subject: [PATCH 100/306] Added imply gate to boolean algebra (#9849) * Add files via upload * Update imply_gate.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update boolean_algebra/imply_gate.py Co-authored-by: Tianyi Zheng * Update imply_gate.py Made changes requested * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update imply_gate.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- boolean_algebra/imply_gate.py | 40 +++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 boolean_algebra/imply_gate.py diff --git a/boolean_algebra/imply_gate.py b/boolean_algebra/imply_gate.py new file mode 100644 index 000000000..151a7ad64 --- /dev/null +++ b/boolean_algebra/imply_gate.py @@ -0,0 +1,40 @@ +""" +An IMPLY Gate is a logic gate in boolean algebra which results to 1 if +either input 1 is 0, or if input 1 is 1, then the output is 1 only if input 2 is 1. +It is true if input 1 implies input 2. + +Following is the truth table of an IMPLY Gate: + ------------------------------ + | Input 1 | Input 2 | Output | + ------------------------------ + | 0 | 0 | 1 | + | 0 | 1 | 1 | + | 1 | 0 | 0 | + | 1 | 1 | 1 | + ------------------------------ + +Refer - https://en.wikipedia.org/wiki/IMPLY_gate +""" + + +def imply_gate(input_1: int, input_2: int) -> int: + """ + Calculate IMPLY of the input values + + >>> imply_gate(0, 0) + 1 + >>> imply_gate(0, 1) + 1 + >>> imply_gate(1, 0) + 0 + >>> imply_gate(1, 1) + 1 + """ + return int(input_1 == 0 or input_2 == 1) + + +if __name__ == "__main__": + print(imply_gate(0, 0)) + print(imply_gate(0, 1)) + print(imply_gate(1, 0)) + print(imply_gate(1, 1)) From b94cdbab1a7f3793e63526cd29a8f415ff0b55ac Mon Sep 17 00:00:00 2001 From: Pranavkumar Mallela <87595299+pranav-mallela@users.noreply.github.com> Date: Fri, 13 Oct 2023 01:21:53 +0530 Subject: [PATCH 101/306] add find triplets with 0 sum (3sum) (#10040) * add find triplets with 0 sum (3sum) * Update find_triplets_with_0_sum.py * Update find_triplets_with_0_sum.py --------- Co-authored-by: Christian Clauss --- .../arrays/find_triplets_with_0_sum.py | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 data_structures/arrays/find_triplets_with_0_sum.py diff --git a/data_structures/arrays/find_triplets_with_0_sum.py b/data_structures/arrays/find_triplets_with_0_sum.py new file mode 100644 index 000000000..8217ff857 --- /dev/null +++ b/data_structures/arrays/find_triplets_with_0_sum.py @@ -0,0 +1,24 @@ +from itertools import combinations + + +def find_triplets_with_0_sum(nums: list[int]) -> list[list[int]]: + """ + Given a list of integers, return elements a, b, c such that a + b + c = 0. + Args: + nums: list of integers + Returns: + list of lists of integers where sum(each_list) == 0 + Examples: + >>> find_triplets_with_0_sum([-1, 0, 1, 2, -1, -4]) + [[-1, -1, 2], [-1, 0, 1]] + >>> find_triplets_with_0_sum([]) + [] + >>> find_triplets_with_0_sum([0, 0, 0]) + [[0, 0, 0]] + >>> find_triplets_with_0_sum([1, 2, 3, 0, -1, -2, -3]) + [[-3, 0, 3], [-3, 1, 2], [-2, -1, 3], [-2, 0, 2], [-1, 0, 1]] + """ + return [ + list(x) + for x in sorted({abc for abc in combinations(sorted(nums), 3) if not sum(abc)}) + ] From 24f6f8c137a6ba9784c06da3694a1d36781b7a88 Mon Sep 17 00:00:00 2001 From: Daniela Large <133594563+dannylarge144@users.noreply.github.com> Date: Fri, 13 Oct 2023 05:29:39 +0100 Subject: [PATCH 102/306] Added nimply gate to boolean_algebra (#10344) * Add files via upload * Update imply_gate.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update boolean_algebra/imply_gate.py Co-authored-by: Tianyi Zheng * Update imply_gate.py Made changes requested * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update imply_gate.py * Added nimply gate * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- boolean_algebra/nimply_gate.py | 40 ++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 boolean_algebra/nimply_gate.py diff --git a/boolean_algebra/nimply_gate.py b/boolean_algebra/nimply_gate.py new file mode 100644 index 000000000..6e34332d9 --- /dev/null +++ b/boolean_algebra/nimply_gate.py @@ -0,0 +1,40 @@ +""" +An NIMPLY Gate is a logic gate in boolean algebra which results to 0 if +either input 1 is 0, or if input 1 is 1, then it is 0 only if input 2 is 1. +It is false if input 1 implies input 2. It is the negated form of imply + +Following is the truth table of an NIMPLY Gate: + ------------------------------ + | Input 1 | Input 2 | Output | + ------------------------------ + | 0 | 0 | 0 | + | 0 | 1 | 0 | + | 1 | 0 | 1 | + | 1 | 1 | 0 | + ------------------------------ + +Refer - https://en.wikipedia.org/wiki/NIMPLY_gate +""" + + +def nimply_gate(input_1: int, input_2: int) -> int: + """ + Calculate NIMPLY of the input values + + >>> nimply_gate(0, 0) + 0 + >>> nimply_gate(0, 1) + 0 + >>> nimply_gate(1, 0) + 1 + >>> nimply_gate(1, 1) + 0 + """ + return int(input_1 == 1 and input_2 == 0) + + +if __name__ == "__main__": + print(nimply_gate(0, 0)) + print(nimply_gate(0, 1)) + print(nimply_gate(1, 0)) + print(nimply_gate(1, 1)) From ebe66935d2842a0e0cbea58dcc647428f357f15e Mon Sep 17 00:00:00 2001 From: Saahil Mahato <115351000+saahil-mahato@users.noreply.github.com> Date: Fri, 13 Oct 2023 11:49:48 +0545 Subject: [PATCH 103/306] Add Solovay-Strassen Primality test (#10335) * Add Solovay-Strassen Primality test * fix: resolve comments * refactor: docs change --- maths/solovay_strassen_primality_test.py | 107 +++++++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 maths/solovay_strassen_primality_test.py diff --git a/maths/solovay_strassen_primality_test.py b/maths/solovay_strassen_primality_test.py new file mode 100644 index 000000000..1d11d4583 --- /dev/null +++ b/maths/solovay_strassen_primality_test.py @@ -0,0 +1,107 @@ +""" +This script implements the Solovay-Strassen Primality test. + +This probabilistic primality test is based on Euler's criterion. It is similar +to the Fermat test but uses quadratic residues. It can quickly identify +composite numbers but may occasionally classify composite numbers as prime. + +More details and concepts about this can be found on: +https://en.wikipedia.org/wiki/Solovay%E2%80%93Strassen_primality_test +""" + + +import random + + +def jacobi_symbol(random_a: int, number: int) -> int: + """ + Calculate the Jacobi symbol. The Jacobi symbol is a generalization + of the Legendre symbol, which can be used to simplify computations involving + quadratic residues. The Jacobi symbol is used in primality tests, like the + Solovay-Strassen test, because it helps determine if an integer is a + quadratic residue modulo a given modulus, providing valuable information + about the number's potential primality or compositeness. + + Parameters: + random_a: A randomly chosen integer from 2 to n-2 (inclusive) + number: The number that is tested for primality + + Returns: + jacobi_symbol: The Jacobi symbol is a mathematical function + used to determine whether an integer is a quadratic residue modulo + another integer (usually prime) or not. + + >>> jacobi_symbol(2, 13) + -1 + >>> jacobi_symbol(5, 19) + 1 + >>> jacobi_symbol(7, 14) + 0 + """ + + if random_a in (0, 1): + return random_a + + random_a %= number + t = 1 + + while random_a != 0: + while random_a % 2 == 0: + random_a //= 2 + r = number % 8 + if r in (3, 5): + t = -t + + random_a, number = number, random_a + + if random_a % 4 == number % 4 == 3: + t = -t + + random_a %= number + + return t if number == 1 else 0 + + +def solovay_strassen(number: int, iterations: int) -> bool: + """ + Check whether the input number is prime or not using + the Solovay-Strassen Primality test + + Parameters: + number: The number that is tested for primality + iterations: The number of times that the test is run + which effects the accuracy + + Returns: + result: True if number is probably prime and false + if not + + >>> random.seed(10) + >>> solovay_strassen(13, 5) + True + >>> solovay_strassen(9, 10) + False + >>> solovay_strassen(17, 15) + True + """ + + if number <= 1: + return False + if number <= 3: + return True + + for _ in range(iterations): + a = random.randint(2, number - 2) + x = jacobi_symbol(a, number) + y = pow(a, (number - 1) // 2, number) + + if x == 0 or y != x % number: + return False + + return True + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From c85506262d8fc6fcf154651ce8affdfb96b57ece Mon Sep 17 00:00:00 2001 From: Saahil Mahato <115351000+saahil-mahato@users.noreply.github.com> Date: Fri, 13 Oct 2023 19:03:52 +0545 Subject: [PATCH 104/306] Add Damerau-Levenshtein distance algorithm (#10159) * Add Damerau-Levenshtein distance algorithm * fix: precommit check * fix: doc correction * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * refactor: use variable for length and doc correction * Update damerau_levenshtein_distance.py * Update damerau_levenshtein_distance.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- strings/damerau_levenshtein_distance.py | 71 +++++++++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 strings/damerau_levenshtein_distance.py diff --git a/strings/damerau_levenshtein_distance.py b/strings/damerau_levenshtein_distance.py new file mode 100644 index 000000000..72de01949 --- /dev/null +++ b/strings/damerau_levenshtein_distance.py @@ -0,0 +1,71 @@ +""" +This script is a implementation of the Damerau-Levenshtein distance algorithm. + +It's an algorithm that measures the edit distance between two string sequences + +More information about this algorithm can be found in this wikipedia article: +https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance +""" + + +def damerau_levenshtein_distance(first_string: str, second_string: str) -> int: + """ + Implements the Damerau-Levenshtein distance algorithm that measures + the edit distance between two strings. + + Parameters: + first_string: The first string to compare + second_string: The second string to compare + + Returns: + distance: The edit distance between the first and second strings + + >>> damerau_levenshtein_distance("cat", "cut") + 1 + >>> damerau_levenshtein_distance("kitten", "sitting") + 3 + >>> damerau_levenshtein_distance("hello", "world") + 4 + >>> damerau_levenshtein_distance("book", "back") + 2 + >>> damerau_levenshtein_distance("container", "containment") + 3 + >>> damerau_levenshtein_distance("container", "containment") + 3 + """ + # Create a dynamic programming matrix to store the distances + dp_matrix = [[0] * (len(second_string) + 1) for _ in range(len(first_string) + 1)] + + # Initialize the matrix + for i in range(len(first_string) + 1): + dp_matrix[i][0] = i + for j in range(len(second_string) + 1): + dp_matrix[0][j] = j + + # Fill the matrix + for i, first_char in enumerate(first_string, start=1): + for j, second_char in enumerate(second_string, start=1): + cost = int(first_char != second_char) + + dp_matrix[i][j] = min( + dp_matrix[i - 1][j] + 1, # Deletion + dp_matrix[i][j - 1] + 1, # Insertion + dp_matrix[i - 1][j - 1] + cost, # Substitution + ) + + if ( + i > 1 + and j > 1 + and first_string[i - 1] == second_string[j - 2] + and first_string[i - 2] == second_string[j - 1] + ): + # Transposition + dp_matrix[i][j] = min(dp_matrix[i][j], dp_matrix[i - 2][j - 2] + cost) + + return dp_matrix[-1][-1] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 1117a50665b053ef7716cf1e80b29e11d30886c7 Mon Sep 17 00:00:00 2001 From: Saurabh Mahapatra <98408932+its-100rabh@users.noreply.github.com> Date: Fri, 13 Oct 2023 21:25:32 +0530 Subject: [PATCH 105/306] Modified comments on lower.py (#10369) --- strings/lower.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/strings/lower.py b/strings/lower.py index 9ae419123..49256b016 100644 --- a/strings/lower.py +++ b/strings/lower.py @@ -14,9 +14,9 @@ def lower(word: str) -> str: 'what' """ - # converting to ascii value int value and checking to see if char is a capital - # letter if it is a capital letter it is getting shift by 32 which makes it a lower - # case letter + # Converting to ASCII value, obtaining the integer representation + # and checking to see if the character is a capital letter. + # If it is a capital letter, it is shifted by 32, making it a lowercase letter. return "".join(chr(ord(char) + 32) if "A" <= char <= "Z" else char for char in word) From d96029e13d181229c692b8e4cafe2661cdae919e Mon Sep 17 00:00:00 2001 From: SalmanSi <114280969+SalmanSi@users.noreply.github.com> Date: Fri, 13 Oct 2023 22:48:31 +0500 Subject: [PATCH 106/306] added doctests for dynamicprogramming/minimum_partition (#10033) * added doctests * added doctests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add doctests to integer_partition.py * Update minimum_partition.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- dynamic_programming/integer_partition.py | 24 +++++++++++++++ dynamic_programming/minimum_partition.py | 38 ++++++++++++++++++++---- 2 files changed, 57 insertions(+), 5 deletions(-) diff --git a/dynamic_programming/integer_partition.py b/dynamic_programming/integer_partition.py index 8ed2e51bd..145bc29d0 100644 --- a/dynamic_programming/integer_partition.py +++ b/dynamic_programming/integer_partition.py @@ -3,10 +3,34 @@ The number of partitions of a number n into at least k parts equals the number o partitions into exactly k parts plus the number of partitions into at least k-1 parts. Subtracting 1 from each part of a partition of n into k parts gives a partition of n-k into k parts. These two facts together are used for this algorithm. +* https://en.wikipedia.org/wiki/Partition_(number_theory) +* https://en.wikipedia.org/wiki/Partition_function_(number_theory) """ def partition(m: int) -> int: + """ + >>> partition(5) + 7 + >>> partition(7) + 15 + >>> partition(100) + 190569292 + >>> partition(1_000) + 24061467864032622473692149727991 + >>> partition(-7) + Traceback (most recent call last): + ... + IndexError: list index out of range + >>> partition(0) + Traceback (most recent call last): + ... + IndexError: list assignment index out of range + >>> partition(7.8) + Traceback (most recent call last): + ... + TypeError: 'float' object cannot be interpreted as an integer + """ memo: list[list[int]] = [[0 for _ in range(m)] for _ in range(m + 1)] for i in range(m + 1): memo[i][0] = 1 diff --git a/dynamic_programming/minimum_partition.py b/dynamic_programming/minimum_partition.py index e6188cb33..748c0599e 100644 --- a/dynamic_programming/minimum_partition.py +++ b/dynamic_programming/minimum_partition.py @@ -3,7 +3,7 @@ Partition a set into two subsets such that the difference of subset sums is mini """ -def find_min(arr: list[int]) -> int: +def find_min(numbers: list[int]) -> int: """ >>> find_min([1, 2, 3, 4, 5]) 1 @@ -15,9 +15,37 @@ def find_min(arr: list[int]) -> int: 3 >>> find_min([]) 0 + >>> find_min([1, 2, 3, 4]) + 0 + >>> find_min([0, 0, 0, 0]) + 0 + >>> find_min([-1, -5, 5, 1]) + 0 + >>> find_min([-1, -5, 5, 1]) + 0 + >>> find_min([9, 9, 9, 9, 9]) + 9 + >>> find_min([1, 5, 10, 3]) + 1 + >>> find_min([-1, 0, 1]) + 0 + >>> find_min(range(10, 0, -1)) + 1 + >>> find_min([-1]) + Traceback (most recent call last): + -- + IndexError: list assignment index out of range + >>> find_min([0, 0, 0, 1, 2, -4]) + Traceback (most recent call last): + ... + IndexError: list assignment index out of range + >>> find_min([-1, -5, -10, -3]) + Traceback (most recent call last): + ... + IndexError: list assignment index out of range """ - n = len(arr) - s = sum(arr) + n = len(numbers) + s = sum(numbers) dp = [[False for x in range(s + 1)] for y in range(n + 1)] @@ -31,8 +59,8 @@ def find_min(arr: list[int]) -> int: for j in range(1, s + 1): dp[i][j] = dp[i - 1][j] - if arr[i - 1] <= j: - dp[i][j] = dp[i][j] or dp[i - 1][j - arr[i - 1]] + if numbers[i - 1] <= j: + dp[i][j] = dp[i][j] or dp[i - 1][j - numbers[i - 1]] for j in range(int(s / 2), -1, -1): if dp[n][j] is True: From 9fb0cd271efec0fc651a5143aedda42f3dc93ea8 Mon Sep 17 00:00:00 2001 From: Dale Dai <145884899+CouldNot@users.noreply.github.com> Date: Fri, 13 Oct 2023 23:47:08 -0700 Subject: [PATCH 107/306] Expand euler phi function doctest (#10401) --- maths/basic_maths.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/maths/basic_maths.py b/maths/basic_maths.py index 26c52c549..c9e3d00fa 100644 --- a/maths/basic_maths.py +++ b/maths/basic_maths.py @@ -98,7 +98,17 @@ def euler_phi(n: int) -> int: """Calculate Euler's Phi Function. >>> euler_phi(100) 40 + >>> euler_phi(0) + Traceback (most recent call last): + ... + ValueError: Only positive numbers are accepted + >>> euler_phi(-10) + Traceback (most recent call last): + ... + ValueError: Only positive numbers are accepted """ + if n <= 0: + raise ValueError("Only positive numbers are accepted") s = n for x in set(prime_factors(n)): s *= (x - 1) / x From 0b2c9fb6f164468b51baa4866c1b8c4f01ec8b64 Mon Sep 17 00:00:00 2001 From: Baron105 <76466796+Baron105@users.noreply.github.com> Date: Sat, 14 Oct 2023 12:31:23 +0530 Subject: [PATCH 108/306] Adding avg and mps speed formulae for ideal gases (#10229) * avg and mps speed formulae added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * avg and mps speed formulae added * fixed_spacing * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * spacing * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ws * added amicable numbers * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added amicable numbers * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * spacing * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * removed * changed name of file and added code improvements * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * issues fixed due to pi * requested changes added --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- physics/speeds_of_gas_molecules.py | 111 +++++++++++++++++++++++++++++ 1 file changed, 111 insertions(+) create mode 100644 physics/speeds_of_gas_molecules.py diff --git a/physics/speeds_of_gas_molecules.py b/physics/speeds_of_gas_molecules.py new file mode 100644 index 000000000..a50d1c0f6 --- /dev/null +++ b/physics/speeds_of_gas_molecules.py @@ -0,0 +1,111 @@ +""" +The root-mean-square, average and most probable speeds of gas molecules are +derived from the Maxwell-Boltzmann distribution. The Maxwell-Boltzmann +distribution is a probability distribution that describes the distribution of +speeds of particles in an ideal gas. + +The distribution is given by the following equation: + + ------------------------------------------------- + | f(v) = (M/2πRT)^(3/2) * 4πv^2 * e^(-Mv^2/2RT) | + ------------------------------------------------- + +where: + f(v) is the fraction of molecules with a speed v + M is the molar mass of the gas in kg/mol + R is the gas constant + T is the absolute temperature + +More information about the Maxwell-Boltzmann distribution can be found here: +https://en.wikipedia.org/wiki/Maxwell%E2%80%93Boltzmann_distribution + +The average speed can be calculated by integrating the Maxwell-Boltzmann distribution +from 0 to infinity and dividing by the total number of molecules. The result is: + + --------------------- + | vavg = √(8RT/πM) | + --------------------- + +The most probable speed is the speed at which the Maxwell-Boltzmann distribution +is at its maximum. This can be found by differentiating the Maxwell-Boltzmann +distribution with respect to v and setting the result equal to zero. The result is: + + --------------------- + | vmp = √(2RT/M) | + --------------------- + +The root-mean-square speed is another measure of the average speed +of the molecules in a gas. It is calculated by taking the square root +of the average of the squares of the speeds of the molecules. The result is: + + --------------------- + | vrms = √(3RT/M) | + --------------------- + +Here we have defined functions to calculate the average and +most probable speeds of molecules in a gas given the +temperature and molar mass of the gas. +""" + +# import the constants R and pi from the scipy.constants library +from scipy.constants import R, pi + + +def avg_speed_of_molecule(temperature: float, molar_mass: float) -> float: + """ + Takes the temperature (in K) and molar mass (in kg/mol) of a gas + and returns the average speed of a molecule in the gas (in m/s). + + Examples: + >>> avg_speed_of_molecule(273, 0.028) # nitrogen at 273 K + 454.3488755020387 + >>> avg_speed_of_molecule(300, 0.032) # oxygen at 300 K + 445.52572733919885 + >>> avg_speed_of_molecule(-273, 0.028) # invalid temperature + Traceback (most recent call last): + ... + Exception: Absolute temperature cannot be less than 0 K + >>> avg_speed_of_molecule(273, 0) # invalid molar mass + Traceback (most recent call last): + ... + Exception: Molar mass should be greater than 0 kg/mol + """ + + if temperature < 0: + raise Exception("Absolute temperature cannot be less than 0 K") + if molar_mass <= 0: + raise Exception("Molar mass should be greater than 0 kg/mol") + return (8 * R * temperature / (pi * molar_mass)) ** 0.5 + + +def mps_speed_of_molecule(temperature: float, molar_mass: float) -> float: + """ + Takes the temperature (in K) and molar mass (in kg/mol) of a gas + and returns the most probable speed of a molecule in the gas (in m/s). + + Examples: + >>> mps_speed_of_molecule(273, 0.028) # nitrogen at 273 K + 402.65620701908966 + >>> mps_speed_of_molecule(300, 0.032) # oxygen at 300 K + 394.836895549922 + >>> mps_speed_of_molecule(-273, 0.028) # invalid temperature + Traceback (most recent call last): + ... + Exception: Absolute temperature cannot be less than 0 K + >>> mps_speed_of_molecule(273, 0) # invalid molar mass + Traceback (most recent call last): + ... + Exception: Molar mass should be greater than 0 kg/mol + """ + + if temperature < 0: + raise Exception("Absolute temperature cannot be less than 0 K") + if molar_mass <= 0: + raise Exception("Molar mass should be greater than 0 kg/mol") + return (2 * R * temperature / molar_mass) ** 0.5 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 37cae3f56169348e97262b1b8f7671785be77a5b Mon Sep 17 00:00:00 2001 From: Muhammad Umer Farooq <115654418+Muhammadummerr@users.noreply.github.com> Date: Sat, 14 Oct 2023 13:31:43 +0500 Subject: [PATCH 109/306] Updated test cases of power_sum.py (#9978) * Updated test cases of power_sum.py * updated * updated. * remove extra comment and used ** instead of pow * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update backtracking/power_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- backtracking/power_sum.py | 42 +++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 22 deletions(-) diff --git a/backtracking/power_sum.py b/backtracking/power_sum.py index fcf1429f8..ee2eac426 100644 --- a/backtracking/power_sum.py +++ b/backtracking/power_sum.py @@ -6,8 +6,6 @@ We have to find all combinations of unique squares adding up to 13. The only solution is 2^2+3^2. Constraints: 1<=X<=1000, 2<=N<=10. """ -from math import pow - def backtrack( needed_sum: int, @@ -19,25 +17,25 @@ def backtrack( """ >>> backtrack(13, 2, 1, 0, 0) (0, 1) - >>> backtrack(100, 2, 1, 0, 0) - (0, 3) - >>> backtrack(100, 3, 1, 0, 0) + >>> backtrack(10, 2, 1, 0, 0) (0, 1) - >>> backtrack(800, 2, 1, 0, 0) - (0, 561) - >>> backtrack(1000, 10, 1, 0, 0) + >>> backtrack(10, 3, 1, 0, 0) (0, 0) - >>> backtrack(400, 2, 1, 0, 0) - (0, 55) - >>> backtrack(50, 1, 1, 0, 0) - (0, 3658) + >>> backtrack(20, 2, 1, 0, 0) + (0, 1) + >>> backtrack(15, 10, 1, 0, 0) + (0, 0) + >>> backtrack(16, 2, 1, 0, 0) + (0, 1) + >>> backtrack(20, 1, 1, 0, 0) + (0, 64) """ if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count - i_to_n = int(pow(current_number, power)) + i_to_n = current_number**power if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n @@ -57,17 +55,17 @@ def solve(needed_sum: int, power: int) -> int: """ >>> solve(13, 2) 1 - >>> solve(100, 2) - 3 - >>> solve(100, 3) + >>> solve(10, 2) 1 - >>> solve(800, 2) - 561 - >>> solve(1000, 10) + >>> solve(10, 3) 0 - >>> solve(400, 2) - 55 - >>> solve(50, 1) + >>> solve(20, 2) + 1 + >>> solve(15, 10) + 0 + >>> solve(16, 2) + 1 + >>> solve(20, 1) Traceback (most recent call last): ... ValueError: Invalid input From 71b372f5e2fd313268018df237d401efd7795464 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sat, 14 Oct 2023 09:34:05 -0400 Subject: [PATCH 110/306] Remove doctest in `xgboost_regressor.py` main function (#10422) * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * Update xgboost_regressor.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- machine_learning/xgboost_regressor.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/machine_learning/xgboost_regressor.py b/machine_learning/xgboost_regressor.py index a540e3ab0..52e041c55 100644 --- a/machine_learning/xgboost_regressor.py +++ b/machine_learning/xgboost_regressor.py @@ -39,13 +39,13 @@ def xgboost( def main() -> None: """ - >>> main() - Mean Absolute Error : 0.30957163379906033 - Mean Square Error : 0.22611560196662744 - The URL for this algorithm https://xgboost.readthedocs.io/en/stable/ California house price dataset is used to demonstrate the algorithm. + + Expected error values: + Mean Absolute Error: 0.30957163379906033 + Mean Square Error: 0.22611560196662744 """ # Load California house price dataset california = fetch_california_housing() @@ -55,8 +55,8 @@ def main() -> None: ) predictions = xgboost(x_train, y_train, x_test) # Error printing - print(f"Mean Absolute Error : {mean_absolute_error(y_test, predictions)}") - print(f"Mean Square Error : {mean_squared_error(y_test, predictions)}") + print(f"Mean Absolute Error: {mean_absolute_error(y_test, predictions)}") + print(f"Mean Square Error: {mean_squared_error(y_test, predictions)}") if __name__ == "__main__": From 212cdfe36c3599804027c79c26ee814e53a12703 Mon Sep 17 00:00:00 2001 From: Dean Bring Date: Sat, 14 Oct 2023 08:35:12 -0700 Subject: [PATCH 111/306] Added validate sudoku board function (#9881) * Added algorithm to deeply clone a graph * Fixed file name and removed a function call * Removed nested function and fixed class parameter types * Fixed doctests * bug fix * Added class decorator * Updated doctests and fixed precommit errors * Cleaned up code * Simplified doctest * Added doctests * Code simplification * Created function which validates sudoku boards * Update matrix/validate_sudoku_board.py * Fixed precommit errors * Removed file accidentally included * Improved readability and simplicity * Add timeit benchmarks * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update validate_sudoku_board.py --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- matrix/validate_sudoku_board.py | 107 ++++++++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 matrix/validate_sudoku_board.py diff --git a/matrix/validate_sudoku_board.py b/matrix/validate_sudoku_board.py new file mode 100644 index 000000000..0ee7b3df0 --- /dev/null +++ b/matrix/validate_sudoku_board.py @@ -0,0 +1,107 @@ +""" +LeetCode 36. Valid Sudoku +https://leetcode.com/problems/valid-sudoku/ +https://en.wikipedia.org/wiki/Sudoku + +Determine if a 9 x 9 Sudoku board is valid. Only the filled cells need to be +validated according to the following rules: + +- Each row must contain the digits 1-9 without repetition. +- Each column must contain the digits 1-9 without repetition. +- Each of the nine 3 x 3 sub-boxes of the grid must contain the digits 1-9 + without repetition. + +Note: + +A Sudoku board (partially filled) could be valid but is not necessarily +solvable. + +Only the filled cells need to be validated according to the mentioned rules. +""" + +from collections import defaultdict + +NUM_SQUARES = 9 +EMPTY_CELL = "." + + +def is_valid_sudoku_board(sudoku_board: list[list[str]]) -> bool: + """ + This function validates (but does not solve) a sudoku board. + The board may be valid but unsolvable. + + >>> is_valid_sudoku_board([ + ... ["5","3",".",".","7",".",".",".","."] + ... ,["6",".",".","1","9","5",".",".","."] + ... ,[".","9","8",".",".",".",".","6","."] + ... ,["8",".",".",".","6",".",".",".","3"] + ... ,["4",".",".","8",".","3",".",".","1"] + ... ,["7",".",".",".","2",".",".",".","6"] + ... ,[".","6",".",".",".",".","2","8","."] + ... ,[".",".",".","4","1","9",".",".","5"] + ... ,[".",".",".",".","8",".",".","7","9"] + ... ]) + True + >>> is_valid_sudoku_board([ + ... ["8","3",".",".","7",".",".",".","."] + ... ,["6",".",".","1","9","5",".",".","."] + ... ,[".","9","8",".",".",".",".","6","."] + ... ,["8",".",".",".","6",".",".",".","3"] + ... ,["4",".",".","8",".","3",".",".","1"] + ... ,["7",".",".",".","2",".",".",".","6"] + ... ,[".","6",".",".",".",".","2","8","."] + ... ,[".",".",".","4","1","9",".",".","5"] + ... ,[".",".",".",".","8",".",".","7","9"] + ... ]) + False + >>> is_valid_sudoku_board([["1", "2", "3", "4", "5", "6", "7", "8", "9"]]) + Traceback (most recent call last): + ... + ValueError: Sudoku boards must be 9x9 squares. + >>> is_valid_sudoku_board( + ... [["1"], ["2"], ["3"], ["4"], ["5"], ["6"], ["7"], ["8"], ["9"]] + ... ) + Traceback (most recent call last): + ... + ValueError: Sudoku boards must be 9x9 squares. + """ + if len(sudoku_board) != NUM_SQUARES or ( + any(len(row) != NUM_SQUARES for row in sudoku_board) + ): + error_message = f"Sudoku boards must be {NUM_SQUARES}x{NUM_SQUARES} squares." + raise ValueError(error_message) + + row_values: defaultdict[int, set[str]] = defaultdict(set) + col_values: defaultdict[int, set[str]] = defaultdict(set) + box_values: defaultdict[tuple[int, int], set[str]] = defaultdict(set) + + for row in range(NUM_SQUARES): + for col in range(NUM_SQUARES): + value = sudoku_board[row][col] + + if value == EMPTY_CELL: + continue + + box = (row // 3, col // 3) + + if ( + value in row_values[row] + or value in col_values[col] + or value in box_values[box] + ): + return False + + row_values[row].add(value) + col_values[col].add(value) + box_values[box].add(value) + + return True + + +if __name__ == "__main__": + from doctest import testmod + from timeit import timeit + + testmod() + print(timeit("is_valid_sudoku_board(valid_board)", globals=globals())) + print(timeit("is_valid_sudoku_board(invalid_board)", globals=globals())) From 3ba23384794bc5ce61a300b96d2b721d9d58eccd Mon Sep 17 00:00:00 2001 From: Aakash Giri Date: Sat, 14 Oct 2023 21:47:11 +0530 Subject: [PATCH 112/306] Add Title Case Conversion (#10439) [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci added more test case and type hint [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci updated naming convention --- strings/title.py | 57 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 strings/title.py diff --git a/strings/title.py b/strings/title.py new file mode 100644 index 000000000..1ec2df548 --- /dev/null +++ b/strings/title.py @@ -0,0 +1,57 @@ +def to_title_case(word: str) -> str: + """ + Converts a string to capitalized case, preserving the input as is + + >>> to_title_case("Aakash") + 'Aakash' + + >>> to_title_case("aakash") + 'Aakash' + + >>> to_title_case("AAKASH") + 'Aakash' + + >>> to_title_case("aAkAsH") + 'Aakash' + """ + + """ + Convert the first character to uppercase if it's lowercase + """ + if "a" <= word[0] <= "z": + word = chr(ord(word[0]) - 32) + word[1:] + + """ + Convert the remaining characters to lowercase if they are uppercase + """ + for i in range(1, len(word)): + if "A" <= word[i] <= "Z": + word = word[:i] + chr(ord(word[i]) + 32) + word[i + 1 :] + + return word + + +def sentence_to_title_case(input_str: str) -> str: + """ + Converts a string to title case, preserving the input as is + + >>> sentence_to_title_case("Aakash Giri") + 'Aakash Giri' + + >>> sentence_to_title_case("aakash giri") + 'Aakash Giri' + + >>> sentence_to_title_case("AAKASH GIRI") + 'Aakash Giri' + + >>> sentence_to_title_case("aAkAsH gIrI") + 'Aakash Giri' + """ + + return " ".join(to_title_case(word) for word in input_str.split()) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 1969259868451684ab05663cc208f06af20d483f Mon Sep 17 00:00:00 2001 From: Manpreet Singh <63737630+ManpreetSingh2004@users.noreply.github.com> Date: Sat, 14 Oct 2023 23:05:01 +0530 Subject: [PATCH 113/306] Performance: 80% faster Project Euler 145 (#10445) * Performance: 80% faster Project Euler145 * Added timeit benchmark * >>> slow_solution() doctest --- project_euler/problem_145/sol1.py | 70 +++++++++++++++++++++++++++---- 1 file changed, 63 insertions(+), 7 deletions(-) diff --git a/project_euler/problem_145/sol1.py b/project_euler/problem_145/sol1.py index e9fc1a199..71b851178 100644 --- a/project_euler/problem_145/sol1.py +++ b/project_euler/problem_145/sol1.py @@ -17,17 +17,17 @@ EVEN_DIGITS = [0, 2, 4, 6, 8] ODD_DIGITS = [1, 3, 5, 7, 9] -def reversible_numbers( +def slow_reversible_numbers( remaining_length: int, remainder: int, digits: list[int], length: int ) -> int: """ Count the number of reversible numbers of given length. Iterate over possible digits considering parity of current sum remainder. - >>> reversible_numbers(1, 0, [0], 1) + >>> slow_reversible_numbers(1, 0, [0], 1) 0 - >>> reversible_numbers(2, 0, [0] * 2, 2) + >>> slow_reversible_numbers(2, 0, [0] * 2, 2) 20 - >>> reversible_numbers(3, 0, [0] * 3, 3) + >>> slow_reversible_numbers(3, 0, [0] * 3, 3) 100 """ if remaining_length == 0: @@ -51,7 +51,7 @@ def reversible_numbers( result = 0 for digit in range(10): digits[length // 2] = digit - result += reversible_numbers( + result += slow_reversible_numbers( 0, (remainder + 2 * digit) // 10, digits, length ) return result @@ -67,7 +67,7 @@ def reversible_numbers( for digit2 in other_parity_digits: digits[(length - remaining_length) // 2] = digit2 - result += reversible_numbers( + result += slow_reversible_numbers( remaining_length - 2, (remainder + digit1 + digit2) // 10, digits, @@ -76,6 +76,42 @@ def reversible_numbers( return result +def slow_solution(max_power: int = 9) -> int: + """ + To evaluate the solution, use solution() + >>> slow_solution(3) + 120 + >>> slow_solution(6) + 18720 + >>> slow_solution(7) + 68720 + """ + result = 0 + for length in range(1, max_power + 1): + result += slow_reversible_numbers(length, 0, [0] * length, length) + return result + + +def reversible_numbers( + remaining_length: int, remainder: int, digits: list[int], length: int +) -> int: + """ + Count the number of reversible numbers of given length. + Iterate over possible digits considering parity of current sum remainder. + >>> reversible_numbers(1, 0, [0], 1) + 0 + >>> reversible_numbers(2, 0, [0] * 2, 2) + 20 + >>> reversible_numbers(3, 0, [0] * 3, 3) + 100 + """ + # There exist no reversible 1, 5, 9, 13 (ie. 4k+1) digit numbers + if (length - 1) % 4 == 0: + return 0 + + return slow_reversible_numbers(length, 0, [0] * length, length) + + def solution(max_power: int = 9) -> int: """ To evaluate the solution, use solution() @@ -92,5 +128,25 @@ def solution(max_power: int = 9) -> int: return result +def benchmark() -> None: + """ + Benchmarks + """ + # Running performance benchmarks... + # slow_solution : 292.9300301000003 + # solution : 54.90970860000016 + + from timeit import timeit + + print("Running performance benchmarks...") + + print(f"slow_solution : {timeit('slow_solution()', globals=globals(), number=10)}") + print(f"solution : {timeit('solution()', globals=globals(), number=10)}") + + if __name__ == "__main__": - print(f"{solution() = }") + print(f"Solution : {solution()}") + benchmark() + + # for i in range(1, 15): + # print(f"{i}. {reversible_numbers(i, 0, [0]*i, i)}") From f968dda5e9b81bd7dd3c5e9b7a69a9a08ed3ead7 Mon Sep 17 00:00:00 2001 From: Saurabh Mahapatra <98408932+its-100rabh@users.noreply.github.com> Date: Sun, 15 Oct 2023 00:32:37 +0530 Subject: [PATCH 114/306] Updated Comments on upper.py (#10442) * Updated Comments on upper.py * Update upper.py * Update upper.py --------- Co-authored-by: Christian Clauss --- strings/upper.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/strings/upper.py b/strings/upper.py index 5edd40b79..0f68a27b9 100644 --- a/strings/upper.py +++ b/strings/upper.py @@ -1,6 +1,8 @@ def upper(word: str) -> str: """ - Will convert the entire string to uppercase letters + Convert an entire string to ASCII uppercase letters by looking for lowercase ASCII + letters and subtracting 32 from their integer representation to get the uppercase + letter. >>> upper("wow") 'WOW' @@ -11,10 +13,6 @@ def upper(word: str) -> str: >>> upper("wh[]32") 'WH[]32' """ - - # Converting to ascii value int value and checking to see if char is a lower letter - # if it is a lowercase letter it is getting shift by 32 which makes it an uppercase - # case letter return "".join(chr(ord(char) - 32) if "a" <= char <= "z" else char for char in word) From c9ba5e1b6f319e34815660542d8ca0c777c8008a Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sat, 14 Oct 2023 16:08:52 -0400 Subject: [PATCH 115/306] Disable unused dependencies (#10467) Comment out dependencies in requirements.txt that are only used by currently-disabled files --- requirements.txt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/requirements.txt b/requirements.txt index 25dba6f5a..1e64818bb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,19 +1,19 @@ beautifulsoup4 fake_useragent imageio -keras +keras ; python_version < '3.12' lxml matplotlib numpy opencv-python pandas pillow -projectq +# projectq # uncomment once quantum/quantum_random.py is fixed qiskit ; python_version < '3.12' qiskit-aer ; python_version < '3.12' requests rich -scikit-fuzzy +# scikit-fuzzy # uncomment once fuzzy_logic/fuzzy_operations.py is fixed scikit-learn statsmodels sympy @@ -21,4 +21,4 @@ tensorflow ; python_version < '3.12' texttable tweepy xgboost -yulewalker +# yulewalker # uncomment once audio_filters/equal_loudness_filter.py is fixed From 3ecad36f92d26676cc73276553cd99763b025b33 Mon Sep 17 00:00:00 2001 From: Manpreet Singh <63737630+ManpreetSingh2004@users.noreply.github.com> Date: Sun, 15 Oct 2023 10:15:44 +0530 Subject: [PATCH 116/306] fix: incorrect range detection in find_missing_number (#10361) * Fix incorrect range detection in find_missing_number * Support consecutive decreasing numbers Added support for consecutive decreasing numbers in the find_missing_number function. * Support unordered numbers --- bit_manipulation/missing_number.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/bit_manipulation/missing_number.py b/bit_manipulation/missing_number.py index 92502a778..32b949daa 100644 --- a/bit_manipulation/missing_number.py +++ b/bit_manipulation/missing_number.py @@ -11,11 +11,18 @@ def find_missing_number(nums: list[int]) -> int: Example: >>> find_missing_number([0, 1, 3, 4]) 2 + >>> find_missing_number([1, 3, 4, 5, 6]) + 2 + >>> find_missing_number([6, 5, 4, 2, 1]) + 3 + >>> find_missing_number([6, 1, 5, 3, 4]) + 2 """ - n = len(nums) - missing_number = n + low = min(nums) + high = max(nums) + missing_number = high - for i in range(n): - missing_number ^= i ^ nums[i] + for i in range(low, high): + missing_number ^= i ^ nums[i - low] return missing_number From 7dbc30181826aa26600f8d24c92b1587b31677c6 Mon Sep 17 00:00:00 2001 From: Ravi Kumar <119737193+ravi-ivar-7@users.noreply.github.com> Date: Sun, 15 Oct 2023 14:37:29 +0530 Subject: [PATCH 117/306] added rkf45 method (#10438) * added rkf45 method * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated rkf45.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated rkf45.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update rkf45.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update rkf45.py with suggestions * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Improved Code Quality rkf45.py * Added more test cases and exception rkf45.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update rkf45.py * corrected some spellings. rkf45.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update rkf45.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update rkf45.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/rkf45.py | 112 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 112 insertions(+) create mode 100644 maths/rkf45.py diff --git a/maths/rkf45.py b/maths/rkf45.py new file mode 100644 index 000000000..29fd447b6 --- /dev/null +++ b/maths/rkf45.py @@ -0,0 +1,112 @@ +""" +Use the Runge-Kutta-Fehlberg method to solve Ordinary Differential Equations. +""" + +from collections.abc import Callable + +import numpy as np + + +def runge_futta_fehlberg_45( + func: Callable, + x_initial: float, + y_initial: float, + step_size: float, + x_final: float, +) -> np.ndarray: + """ + Solve an Ordinary Differential Equations using Runge-Kutta-Fehlberg Method (rkf45) + of order 5. + + https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta%E2%80%93Fehlberg_method + + args: + func: An ordinary differential equation (ODE) as function of x and y. + x_initial: The initial value of x. + y_initial: The initial value of y. + step_size: The increment value of x. + x_final: The final value of x. + + Returns: + Solution of y at each nodal point + + # exact value of y[1] is tan(0.2) = 0.2027100937470787 + >>> def f(x, y): + ... return 1 + y**2 + >>> y = runge_futta_fehlberg_45(f, 0, 0, 0.2, 1) + >>> y[1] + 0.2027100937470787 + >>> def f(x,y): + ... return x + >>> y = runge_futta_fehlberg_45(f, -1, 0, 0.2, 0) + >>> y[1] + -0.18000000000000002 + >>> y = runge_futta_fehlberg_45(5, 0, 0, 0.1, 1) + Traceback (most recent call last): + ... + TypeError: 'int' object is not callable + >>> def f(x, y): + ... return x + y + >>> y = runge_futta_fehlberg_45(f, 0, 0, 0.2, -1) + Traceback (most recent call last): + ... + ValueError: The final value x must be greater than initial value of x. + >>> def f(x, y): + ... return x + >>> y = runge_futta_fehlberg_45(f, -1, 0, -0.2, 0) + Traceback (most recent call last): + ... + ValueError: Step size must be positive. + """ + if x_initial >= x_final: + raise ValueError("The final value x must be greater than initial value of x.") + + if step_size <= 0: + raise ValueError("Step size must be positive.") + + n = int((x_final - x_initial) / step_size) + y = np.zeros( + (n + 1), + ) + x = np.zeros(n + 1) + y[0] = y_initial + x[0] = x_initial + for i in range(n): + k1 = step_size * func(x[i], y[i]) + k2 = step_size * func(x[i] + step_size / 4, y[i] + k1 / 4) + k3 = step_size * func( + x[i] + (3 / 8) * step_size, y[i] + (3 / 32) * k1 + (9 / 32) * k2 + ) + k4 = step_size * func( + x[i] + (12 / 13) * step_size, + y[i] + (1932 / 2197) * k1 - (7200 / 2197) * k2 + (7296 / 2197) * k3, + ) + k5 = step_size * func( + x[i] + step_size, + y[i] + (439 / 216) * k1 - 8 * k2 + (3680 / 513) * k3 - (845 / 4104) * k4, + ) + k6 = step_size * func( + x[i] + step_size / 2, + y[i] + - (8 / 27) * k1 + + 2 * k2 + - (3544 / 2565) * k3 + + (1859 / 4104) * k4 + - (11 / 40) * k5, + ) + y[i + 1] = ( + y[i] + + (16 / 135) * k1 + + (6656 / 12825) * k3 + + (28561 / 56430) * k4 + - (9 / 50) * k5 + + (2 / 55) * k6 + ) + x[i + 1] = step_size + x[i] + return y + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 1ebae5d43e2ce23ef98a0804bf1fa077d2fa5daf Mon Sep 17 00:00:00 2001 From: Manpreet Singh <63737630+ManpreetSingh2004@users.noreply.github.com> Date: Sun, 15 Oct 2023 14:47:22 +0530 Subject: [PATCH 118/306] Performance: 75% faster Project Euler 187 (#10503) * Add comments and wikipedia link in calculate_prime_numbers * Add improved calculate_prime_numbers * Separate slow_solution and new_solution * Use for loops in solution * Separate while_solution and new solution * Add performance benchmark * Add doctest for calculate_prime_numbers * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Removed white space --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- project_euler/problem_187/sol1.py | 118 ++++++++++++++++++++++++++++-- 1 file changed, 111 insertions(+), 7 deletions(-) diff --git a/project_euler/problem_187/sol1.py b/project_euler/problem_187/sol1.py index 12f03e2a7..8944776fe 100644 --- a/project_euler/problem_187/sol1.py +++ b/project_euler/problem_187/sol1.py @@ -14,29 +14,89 @@ not necessarily distinct, prime factors? from math import isqrt -def calculate_prime_numbers(max_number: int) -> list[int]: +def slow_calculate_prime_numbers(max_number: int) -> list[int]: """ - Returns prime numbers below max_number + Returns prime numbers below max_number. + See: https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes - >>> calculate_prime_numbers(10) + >>> slow_calculate_prime_numbers(10) [2, 3, 5, 7] + + >>> slow_calculate_prime_numbers(2) + [] """ + # List containing a bool value for every number below max_number/2 is_prime = [True] * max_number + for i in range(2, isqrt(max_number - 1) + 1): if is_prime[i]: + # Mark all multiple of i as not prime for j in range(i**2, max_number, i): is_prime[j] = False return [i for i in range(2, max_number) if is_prime[i]] -def solution(max_number: int = 10**8) -> int: +def calculate_prime_numbers(max_number: int) -> list[int]: + """ + Returns prime numbers below max_number. + See: https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes + + >>> calculate_prime_numbers(10) + [2, 3, 5, 7] + + >>> calculate_prime_numbers(2) + [] + """ + + if max_number <= 2: + return [] + + # List containing a bool value for every odd number below max_number/2 + is_prime = [True] * (max_number // 2) + + for i in range(3, isqrt(max_number - 1) + 1, 2): + if is_prime[i // 2]: + # Mark all multiple of i as not prime using list slicing + is_prime[i**2 // 2 :: i] = [False] * ( + # Same as: (max_number - (i**2)) // (2 * i) + 1 + # but faster than len(is_prime[i**2 // 2 :: i]) + len(range(i**2 // 2, max_number // 2, i)) + ) + + return [2] + [2 * i + 1 for i in range(1, max_number // 2) if is_prime[i]] + + +def slow_solution(max_number: int = 10**8) -> int: """ Returns the number of composite integers below max_number have precisely two, - not necessarily distinct, prime factors + not necessarily distinct, prime factors. - >>> solution(30) + >>> slow_solution(30) + 10 + """ + + prime_numbers = slow_calculate_prime_numbers(max_number // 2) + + semiprimes_count = 0 + left = 0 + right = len(prime_numbers) - 1 + while left <= right: + while prime_numbers[left] * prime_numbers[right] >= max_number: + right -= 1 + semiprimes_count += right - left + 1 + left += 1 + + return semiprimes_count + + +def while_solution(max_number: int = 10**8) -> int: + """ + Returns the number of composite integers below max_number have precisely two, + not necessarily distinct, prime factors. + + >>> while_solution(30) 10 """ @@ -54,5 +114,49 @@ def solution(max_number: int = 10**8) -> int: return semiprimes_count +def solution(max_number: int = 10**8) -> int: + """ + Returns the number of composite integers below max_number have precisely two, + not necessarily distinct, prime factors. + + >>> solution(30) + 10 + """ + + prime_numbers = calculate_prime_numbers(max_number // 2) + + semiprimes_count = 0 + right = len(prime_numbers) - 1 + for left in range(len(prime_numbers)): + if left > right: + break + for r in range(right, left - 2, -1): + if prime_numbers[left] * prime_numbers[r] < max_number: + break + right = r + semiprimes_count += right - left + 1 + + return semiprimes_count + + +def benchmark() -> None: + """ + Benchmarks + """ + # Running performance benchmarks... + # slow_solution : 108.50874730000032 + # while_sol : 28.09581200000048 + # solution : 25.063097400000515 + + from timeit import timeit + + print("Running performance benchmarks...") + + print(f"slow_solution : {timeit('slow_solution()', globals=globals(), number=10)}") + print(f"while_sol : {timeit('while_solution()', globals=globals(), number=10)}") + print(f"solution : {timeit('solution()', globals=globals(), number=10)}") + + if __name__ == "__main__": - print(f"{solution() = }") + print(f"Solution: {solution()}") + benchmark() From 85cdb93a0d7a306633faa03a134d0d39da7076a8 Mon Sep 17 00:00:00 2001 From: Kosuri L Indu <118645569+kosuri-indu@users.noreply.github.com> Date: Sun, 15 Oct 2023 15:48:28 +0530 Subject: [PATCH 119/306] [Add] : Job Sequence program under GREEDY methods (#10482) * to add job seq program * to add job seq program * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * to add definitions in parameters * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * to add definitions in parameters * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * to add definitions in parameters * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changes as recommended * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * type hint error resolved * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * removed lambda * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * import stmts order * Update and rename job_sequence.py to job_sequence_with_deadline.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- scheduling/job_sequence_with_deadline.py | 62 ++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 scheduling/job_sequence_with_deadline.py diff --git a/scheduling/job_sequence_with_deadline.py b/scheduling/job_sequence_with_deadline.py new file mode 100644 index 000000000..fccb49cd8 --- /dev/null +++ b/scheduling/job_sequence_with_deadline.py @@ -0,0 +1,62 @@ +""" +Given a list of tasks, each with a deadline and reward, calculate which tasks can be +completed to yield the maximum reward. Each task takes one unit of time to complete, +and we can only work on one task at a time. Once a task has passed its deadline, it +can no longer be scheduled. + +Example : +tasks_info = [(4, 20), (1, 10), (1, 40), (1, 30)] +max_tasks will return (2, [2, 0]) - +Scheduling these tasks would result in a reward of 40 + 20 + +This problem can be solved using the concept of "GREEDY ALGORITHM". +Time Complexity - O(n log n) +https://medium.com/@nihardudhat2000/job-sequencing-with-deadline-17ddbb5890b5 +""" +from dataclasses import dataclass +from operator import attrgetter + + +@dataclass +class Task: + task_id: int + deadline: int + reward: int + + +def max_tasks(tasks_info: list[tuple[int, int]]) -> list[int]: + """ + Create a list of Task objects that are sorted so the highest rewards come first. + Return a list of those task ids that can be completed before i becomes too high. + >>> max_tasks([(4, 20), (1, 10), (1, 40), (1, 30)]) + [2, 0] + >>> max_tasks([(1, 10), (2, 20), (3, 30), (2, 40)]) + [3, 2] + >>> max_tasks([(9, 10)]) + [0] + >>> max_tasks([(-9, 10)]) + [] + >>> max_tasks([]) + [] + >>> max_tasks([(0, 10), (0, 20), (0, 30), (0, 40)]) + [] + >>> max_tasks([(-1, 10), (-2, 20), (-3, 30), (-4, 40)]) + [] + """ + tasks = sorted( + ( + Task(task_id, deadline, reward) + for task_id, (deadline, reward) in enumerate(tasks_info) + ), + key=attrgetter("reward"), + reverse=True, + ) + return [task.task_id for i, task in enumerate(tasks, start=1) if task.deadline >= i] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + print(f"{max_tasks([(4, 20), (1, 10), (1, 40), (1, 30)]) = }") + print(f"{max_tasks([(1, 10), (2, 20), (3, 30), (2, 40)]) = }") From 777eca813a8030e7a674072c79da144e92dde07a Mon Sep 17 00:00:00 2001 From: Ravi Kumar <119737193+ravi-ivar-7@users.noreply.github.com> Date: Sun, 15 Oct 2023 16:25:56 +0530 Subject: [PATCH 120/306] Corrected typo in function name and doctests. rkf45.py (#10518) * Corrected typo in function name and doctests. rkf45.py There was a mistake in name of function (runge_futta_fehlberg instead of runge_kutta_fehlberg) . I have corrected this in function name and also doctest. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Rename rkf45.py to runge_kutta_fehlberg_45.py * Update runge_kutta_fehlberg_45.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/{rkf45.py => runge_kutta_fehlberg_45.py} | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) rename maths/{rkf45.py => runge_kutta_fehlberg_45.py} (84%) diff --git a/maths/rkf45.py b/maths/runge_kutta_fehlberg_45.py similarity index 84% rename from maths/rkf45.py rename to maths/runge_kutta_fehlberg_45.py index 29fd447b6..8181fe301 100644 --- a/maths/rkf45.py +++ b/maths/runge_kutta_fehlberg_45.py @@ -7,7 +7,7 @@ from collections.abc import Callable import numpy as np -def runge_futta_fehlberg_45( +def runge_kutta_fehlberg_45( func: Callable, x_initial: float, y_initial: float, @@ -33,33 +33,35 @@ def runge_futta_fehlberg_45( # exact value of y[1] is tan(0.2) = 0.2027100937470787 >>> def f(x, y): ... return 1 + y**2 - >>> y = runge_futta_fehlberg_45(f, 0, 0, 0.2, 1) + >>> y = runge_kutta_fehlberg_45(f, 0, 0, 0.2, 1) >>> y[1] 0.2027100937470787 >>> def f(x,y): ... return x - >>> y = runge_futta_fehlberg_45(f, -1, 0, 0.2, 0) + >>> y = runge_kutta_fehlberg_45(f, -1, 0, 0.2, 0) >>> y[1] -0.18000000000000002 - >>> y = runge_futta_fehlberg_45(5, 0, 0, 0.1, 1) + >>> y = runge_kutta_fehlberg_45(5, 0, 0, 0.1, 1) Traceback (most recent call last): ... TypeError: 'int' object is not callable >>> def f(x, y): ... return x + y - >>> y = runge_futta_fehlberg_45(f, 0, 0, 0.2, -1) + >>> y = runge_kutta_fehlberg_45(f, 0, 0, 0.2, -1) Traceback (most recent call last): ... - ValueError: The final value x must be greater than initial value of x. + ValueError: The final value of x must be greater than initial value of x. >>> def f(x, y): ... return x - >>> y = runge_futta_fehlberg_45(f, -1, 0, -0.2, 0) + >>> y = runge_kutta_fehlberg_45(f, -1, 0, -0.2, 0) Traceback (most recent call last): ... ValueError: Step size must be positive. """ if x_initial >= x_final: - raise ValueError("The final value x must be greater than initial value of x.") + raise ValueError( + "The final value of x must be greater than initial value of x." + ) if step_size <= 0: raise ValueError("Step size must be positive.") From 79a91cca956b99acf5e4bd785ff0640c9e591b89 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sun, 15 Oct 2023 16:57:08 +0200 Subject: [PATCH 121/306] Fix typo in filename: ciphers/trifid_cipher.py (#10516) * Update and rename trafid_cipher.py to trifid_cipher.py * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 14 +++++++++++++- ciphers/{trafid_cipher.py => trifid_cipher.py} | 0 2 files changed, 13 insertions(+), 1 deletion(-) rename ciphers/{trafid_cipher.py => trifid_cipher.py} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 2c6000c94..ceee9972d 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -63,7 +63,9 @@ ## Boolean Algebra * [And Gate](boolean_algebra/and_gate.py) + * [Imply Gate](boolean_algebra/imply_gate.py) * [Nand Gate](boolean_algebra/nand_gate.py) + * [Nimply Gate](boolean_algebra/nimply_gate.py) * [Nor Gate](boolean_algebra/nor_gate.py) * [Not Gate](boolean_algebra/not_gate.py) * [Or Gate](boolean_algebra/or_gate.py) @@ -119,9 +121,9 @@ * [Shuffled Shift Cipher](ciphers/shuffled_shift_cipher.py) * [Simple Keyword Cypher](ciphers/simple_keyword_cypher.py) * [Simple Substitution Cipher](ciphers/simple_substitution_cipher.py) - * [Trafid Cipher](ciphers/trafid_cipher.py) * [Transposition Cipher](ciphers/transposition_cipher.py) * [Transposition Cipher Encrypt Decrypt File](ciphers/transposition_cipher_encrypt_decrypt_file.py) + * [Trifid Cipher](ciphers/trifid_cipher.py) * [Vigenere Cipher](ciphers/vigenere_cipher.py) * [Xor Cipher](ciphers/xor_cipher.py) @@ -174,7 +176,9 @@ ## Data Structures * Arrays * [Equilibrium Index In Array](data_structures/arrays/equilibrium_index_in_array.py) + * [Find Triplets With 0 Sum](data_structures/arrays/find_triplets_with_0_sum.py) * [Median Two Array](data_structures/arrays/median_two_array.py) + * [Pairs With Given Sum](data_structures/arrays/pairs_with_given_sum.py) * [Permutations](data_structures/arrays/permutations.py) * [Prefix Sum](data_structures/arrays/prefix_sum.py) * [Product Sum](data_structures/arrays/product_sum.py) @@ -385,6 +389,7 @@ ## Financial * [Equated Monthly Installments](financial/equated_monthly_installments.py) + * [Exponential Moving Average](financial/exponential_moving_average.py) * [Interest](financial/interest.py) * [Present Value](financial/present_value.py) * [Price Plus Tax](financial/price_plus_tax.py) @@ -670,6 +675,7 @@ * [Radians](maths/radians.py) * [Radix2 Fft](maths/radix2_fft.py) * [Remove Digit](maths/remove_digit.py) + * [Rkf45](maths/rkf45.py) * [Runge Kutta](maths/runge_kutta.py) * [Segmented Sieve](maths/segmented_sieve.py) * Series @@ -688,6 +694,7 @@ * [Sin](maths/sin.py) * [Sock Merchant](maths/sock_merchant.py) * [Softmax](maths/softmax.py) + * [Solovay Strassen Primality Test](maths/solovay_strassen_primality_test.py) * [Square Root](maths/square_root.py) * [Sum Of Arithmetic Series](maths/sum_of_arithmetic_series.py) * [Sum Of Digits](maths/sum_of_digits.py) @@ -728,6 +735,7 @@ * [Spiral Print](matrix/spiral_print.py) * Tests * [Test Matrix Operation](matrix/tests/test_matrix_operation.py) + * [Validate Sudoku Board](matrix/validate_sudoku_board.py) ## Networking Flow * [Ford Fulkerson](networking_flow/ford_fulkerson.py) @@ -803,6 +811,7 @@ * [Rms Speed Of Molecule](physics/rms_speed_of_molecule.py) * [Shear Stress](physics/shear_stress.py) * [Speed Of Sound](physics/speed_of_sound.py) + * [Speeds Of Gas Molecules](physics/speeds_of_gas_molecules.py) ## Project Euler * Problem 001 @@ -1106,6 +1115,7 @@ ## Scheduling * [First Come First Served](scheduling/first_come_first_served.py) * [Highest Response Ratio Next](scheduling/highest_response_ratio_next.py) + * [Job Sequence With Deadline](scheduling/job_sequence_with_deadline.py) * [Job Sequencing With Deadline](scheduling/job_sequencing_with_deadline.py) * [Multi Level Feedback Queue](scheduling/multi_level_feedback_queue.py) * [Non Preemptive Shortest Job First](scheduling/non_preemptive_shortest_job_first.py) @@ -1193,6 +1203,7 @@ * [Capitalize](strings/capitalize.py) * [Check Anagrams](strings/check_anagrams.py) * [Credit Card Validator](strings/credit_card_validator.py) + * [Damerau Levenshtein Distance](strings/damerau_levenshtein_distance.py) * [Detecting English Programmatically](strings/detecting_english_programmatically.py) * [Dna](strings/dna.py) * [Frequency Finder](strings/frequency_finder.py) @@ -1225,6 +1236,7 @@ * [String Switch Case](strings/string_switch_case.py) * [Strip](strings/strip.py) * [Text Justification](strings/text_justification.py) + * [Title](strings/title.py) * [Top K Frequent Words](strings/top_k_frequent_words.py) * [Upper](strings/upper.py) * [Wave](strings/wave.py) diff --git a/ciphers/trafid_cipher.py b/ciphers/trifid_cipher.py similarity index 100% rename from ciphers/trafid_cipher.py rename to ciphers/trifid_cipher.py From b5474ab68a0e1eea6bbfba445feca39db471c62f Mon Sep 17 00:00:00 2001 From: Rahul Jangra <106389897+leonado10000@users.noreply.github.com> Date: Sun, 15 Oct 2023 20:33:03 +0530 Subject: [PATCH 122/306] [ADD] : maths joint probabilty distribution (#10508) * Create joint_probability_distribution.py * Update joint_probability_distribution.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update joint_probability_distribution.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update joint_probability_distribution.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update joint_probability_distribution.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update joint_probability_distribution.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update joint_probability_distribution.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update joint_probability_distribution.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update joint_probability_distribution.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update joint_probability_distribution.py * Update joint_probability_distribution.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update joint_probability_distribution.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update joint_probability_distribution.py * Update joint_probability_distribution.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maclaurin_series.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Revert changes to maclaurin_series.py * Revert changes to maclaurin_series.py * Update joint_probability_distribution.py * Update joint_probability_distribution.py * Update joint_probability_distribution.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/joint_probability_distribution.py | 124 ++++++++++++++++++++++++ 1 file changed, 124 insertions(+) create mode 100644 maths/joint_probability_distribution.py diff --git a/maths/joint_probability_distribution.py b/maths/joint_probability_distribution.py new file mode 100644 index 000000000..6fbcea40c --- /dev/null +++ b/maths/joint_probability_distribution.py @@ -0,0 +1,124 @@ +""" +Calculate joint probability distribution +https://en.wikipedia.org/wiki/Joint_probability_distribution +""" + + +def joint_probability_distribution( + x_values: list[int], + y_values: list[int], + x_probabilities: list[float], + y_probabilities: list[float], +) -> dict: + """ + >>> joint_distribution = joint_probability_distribution( + ... [1, 2], [-2, 5, 8], [0.7, 0.3], [0.3, 0.5, 0.2] + ... ) + >>> from math import isclose + >>> isclose(joint_distribution.pop((1, 8)), 0.14) + True + >>> joint_distribution + {(1, -2): 0.21, (1, 5): 0.35, (2, -2): 0.09, (2, 5): 0.15, (2, 8): 0.06} + """ + return { + (x, y): x_prob * y_prob + for x, x_prob in zip(x_values, x_probabilities) + for y, y_prob in zip(y_values, y_probabilities) + } + + +# Function to calculate the expectation (mean) +def expectation(values: list, probabilities: list) -> float: + """ + >>> from math import isclose + >>> isclose(expectation([1, 2], [0.7, 0.3]), 1.3) + True + """ + return sum(x * p for x, p in zip(values, probabilities)) + + +# Function to calculate the variance +def variance(values: list[int], probabilities: list[float]) -> float: + """ + >>> from math import isclose + >>> isclose(variance([1,2],[0.7,0.3]), 0.21) + True + """ + mean = expectation(values, probabilities) + return sum((x - mean) ** 2 * p for x, p in zip(values, probabilities)) + + +# Function to calculate the covariance +def covariance( + x_values: list[int], + y_values: list[int], + x_probabilities: list[float], + y_probabilities: list[float], +) -> float: + """ + >>> covariance([1, 2], [-2, 5, 8], [0.7, 0.3], [0.3, 0.5, 0.2]) + -2.7755575615628914e-17 + """ + mean_x = expectation(x_values, x_probabilities) + mean_y = expectation(y_values, y_probabilities) + return sum( + (x - mean_x) * (y - mean_y) * px * py + for x, px in zip(x_values, x_probabilities) + for y, py in zip(y_values, y_probabilities) + ) + + +# Function to calculate the standard deviation +def standard_deviation(variance: float) -> float: + """ + >>> standard_deviation(0.21) + 0.458257569495584 + """ + return variance**0.5 + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + # Input values for X and Y + x_vals = input("Enter values of X separated by spaces: ").split() + y_vals = input("Enter values of Y separated by spaces: ").split() + + # Convert input values to integers + x_values = [int(x) for x in x_vals] + y_values = [int(y) for y in y_vals] + + # Input probabilities for X and Y + x_probs = input("Enter probabilities for X separated by spaces: ").split() + y_probs = input("Enter probabilities for Y separated by spaces: ").split() + assert len(x_values) == len(x_probs) + assert len(y_values) == len(y_probs) + + # Convert input probabilities to floats + x_probabilities = [float(p) for p in x_probs] + y_probabilities = [float(p) for p in y_probs] + + # Calculate the joint probability distribution + jpd = joint_probability_distribution( + x_values, y_values, x_probabilities, y_probabilities + ) + + # Print the joint probability distribution + print( + "\n".join( + f"P(X={x}, Y={y}) = {probability}" for (x, y), probability in jpd.items() + ) + ) + mean_xy = expectation( + [x * y for x in x_values for y in y_values], + [px * py for px in x_probabilities for py in y_probabilities], + ) + print(f"x mean: {expectation(x_values, x_probabilities) = }") + print(f"y mean: {expectation(y_values, y_probabilities) = }") + print(f"xy mean: {mean_xy}") + print(f"x: {variance(x_values, x_probabilities) = }") + print(f"y: {variance(y_values, y_probabilities) = }") + print(f"{covariance(x_values, y_values, x_probabilities, y_probabilities) = }") + print(f"x: {standard_deviation(variance(x_values, x_probabilities)) = }") + print(f"y: {standard_deviation(variance(y_values, y_probabilities)) = }") From 755659a62f2c976e1e359a4c0af576b2aa8843a8 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sun, 15 Oct 2023 11:16:56 -0400 Subject: [PATCH 123/306] Omit `project_euler/` from coverage reports (#10469) * Omit project_euler/ and scripts/ from coverage reports * Add scripts/ back into coverage reports --- pyproject.toml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index fe5f2f09c..9c9262d77 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -128,7 +128,10 @@ addopts = [ ] [tool.coverage.report] -omit = [".env/*"] +omit = [ + ".env/*", + "project_euler/*" +] sort = "Cover" [tool.codespell] From 52040a7bf1795e32cbf3863729c010aa55020063 Mon Sep 17 00:00:00 2001 From: Aroson <74296409+Aroson1@users.noreply.github.com> Date: Sun, 15 Oct 2023 21:05:02 +0530 Subject: [PATCH 124/306] Added 555 timer duty cycle and freqency in astable mode. (#10456) * Add files via upload * Update wheatstone_bridge.py * Update wheatstone_bridge.py * Create IC_555_Timer.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update IC_555_Timer.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update IC_555_Timer.py * Update and rename IC_555_Timer.py to ic_555_timer.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update ic_555_timer.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update ic_555_timer.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update ic_555_timer.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Cleanup ic_555_timer.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- electronics/ic_555_timer.py | 75 +++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 electronics/ic_555_timer.py diff --git a/electronics/ic_555_timer.py b/electronics/ic_555_timer.py new file mode 100644 index 000000000..e187e1928 --- /dev/null +++ b/electronics/ic_555_timer.py @@ -0,0 +1,75 @@ +from __future__ import annotations + +""" + Calculate the frequency and/or duty cycle of an astable 555 timer. + * https://en.wikipedia.org/wiki/555_timer_IC#Astable + + These functions take in the value of the external resistances (in ohms) + and capacitance (in Microfarad), and calculates the following: + + ------------------------------------- + | Freq = 1.44 /[( R1+ 2 x R2) x C1] | ... in Hz + ------------------------------------- + where Freq is the frequency, + R1 is the first resistance in ohms, + R2 is the second resistance in ohms, + C1 is the capacitance in Microfarads. + + ------------------------------------------------ + | Duty Cycle = (R1 + R2) / (R1 + 2 x R2) x 100 | ... in % + ------------------------------------------------ + where R1 is the first resistance in ohms, + R2 is the second resistance in ohms. +""" + + +def astable_frequency( + resistance_1: float, resistance_2: float, capacitance: float +) -> float: + """ + Usage examples: + >>> astable_frequency(resistance_1=45, resistance_2=45, capacitance=7) + 1523.8095238095239 + >>> astable_frequency(resistance_1=356, resistance_2=234, capacitance=976) + 1.7905459175553078 + >>> astable_frequency(resistance_1=2, resistance_2=-1, capacitance=2) + Traceback (most recent call last): + ... + ValueError: All values must be positive + >>> astable_frequency(resistance_1=45, resistance_2=45, capacitance=0) + Traceback (most recent call last): + ... + ValueError: All values must be positive + """ + + if resistance_1 <= 0 or resistance_2 <= 0 or capacitance <= 0: + raise ValueError("All values must be positive") + return (1.44 / ((resistance_1 + 2 * resistance_2) * capacitance)) * 10**6 + + +def astable_duty_cycle(resistance_1: float, resistance_2: float) -> float: + """ + Usage examples: + >>> astable_duty_cycle(resistance_1=45, resistance_2=45) + 66.66666666666666 + >>> astable_duty_cycle(resistance_1=356, resistance_2=234) + 71.60194174757282 + >>> astable_duty_cycle(resistance_1=2, resistance_2=-1) + Traceback (most recent call last): + ... + ValueError: All values must be positive + >>> astable_duty_cycle(resistance_1=0, resistance_2=0) + Traceback (most recent call last): + ... + ValueError: All values must be positive + """ + + if resistance_1 <= 0 or resistance_2 <= 0: + raise ValueError("All values must be positive") + return (resistance_1 + resistance_2) / (resistance_1 + 2 * resistance_2) * 100 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From b2636d90b3fe697ff64a62b928edfbeccf216e8a Mon Sep 17 00:00:00 2001 From: K Anamithra Date: Sun, 15 Oct 2023 22:11:29 +0530 Subject: [PATCH 125/306] added implementing stack using two queues (#10076) * added implementing stack using two queues * Update Stack using two queues * Update stack_using_two_queues.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update stack_using_two_queues.py * Update stack_using_two_queues.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update stack_using_two_queues.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update stack_using_two_queues.py * Update stack_using_two_queues.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../stacks/stack_using_two_queues.py | 85 +++++++++++++++++++ 1 file changed, 85 insertions(+) create mode 100644 data_structures/stacks/stack_using_two_queues.py diff --git a/data_structures/stacks/stack_using_two_queues.py b/data_structures/stacks/stack_using_two_queues.py new file mode 100644 index 000000000..4b73246a0 --- /dev/null +++ b/data_structures/stacks/stack_using_two_queues.py @@ -0,0 +1,85 @@ +from __future__ import annotations + +from collections import deque +from dataclasses import dataclass, field + + +@dataclass +class StackWithQueues: + """ + https://www.geeksforgeeks.org/implement-stack-using-queue/ + + >>> stack = StackWithQueues() + >>> stack.push(1) + >>> stack.push(2) + >>> stack.push(3) + >>> stack.peek() + 3 + >>> stack.pop() + 3 + >>> stack.peek() + 2 + >>> stack.pop() + 2 + >>> stack.pop() + 1 + >>> stack.peek() is None + True + >>> stack.pop() + Traceback (most recent call last): + ... + IndexError: pop from an empty deque + """ + + main_queue: deque[int] = field(default_factory=deque) + temp_queue: deque[int] = field(default_factory=deque) + + def push(self, item: int) -> None: + self.temp_queue.append(item) + while self.main_queue: + self.temp_queue.append(self.main_queue.popleft()) + self.main_queue, self.temp_queue = self.temp_queue, self.main_queue + + def pop(self) -> int: + return self.main_queue.popleft() + + def peek(self) -> int | None: + return self.main_queue[0] if self.main_queue else None + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + stack: StackWithQueues | None = StackWithQueues() + while stack: + print("\nChoose operation:") + print("1. Push") + print("2. Pop") + print("3. Peek") + print("4. Quit") + + choice = input("Enter choice (1/2/3/4): ") + + if choice == "1": + element = int(input("Enter an integer to push: ").strip()) + stack.push(element) + print(f"{element} pushed onto the stack.") + elif choice == "2": + popped_element = stack.pop() + if popped_element is not None: + print(f"Popped element: {popped_element}") + else: + print("Stack is empty.") + elif choice == "3": + peeked_element = stack.peek() + if peeked_element is not None: + print(f"Top element: {peeked_element}") + else: + print("Stack is empty.") + elif choice == "4": + del stack + stack = None + else: + print("Invalid choice. Please try again.") From 68e6d5ad7e9af8929a22a889b1182706abbfcb50 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sun, 15 Oct 2023 19:11:05 +0200 Subject: [PATCH 126/306] validate_solutions.py: os.getenv('GITHUB_TOKEN', '') (#10546) * validate_solutions.py: os.getenv('GITHUB_TOKEN', '') @tianyizheng02 * updating DIRECTORY.md * f this --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 4 +++- scripts/validate_solutions.py | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index ceee9972d..6213f26b6 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -373,6 +373,7 @@ * [Electric Conductivity](electronics/electric_conductivity.py) * [Electric Power](electronics/electric_power.py) * [Electrical Impedance](electronics/electrical_impedance.py) + * [Ic 555 Timer](electronics/ic_555_timer.py) * [Ind Reactance](electronics/ind_reactance.py) * [Ohms Law](electronics/ohms_law.py) * [Real And Reactive Power](electronics/real_and_reactive_power.py) @@ -622,6 +623,7 @@ * [Is Ip V4 Address Valid](maths/is_ip_v4_address_valid.py) * [Is Square Free](maths/is_square_free.py) * [Jaccard Similarity](maths/jaccard_similarity.py) + * [Joint Probability Distribution](maths/joint_probability_distribution.py) * [Juggler Sequence](maths/juggler_sequence.py) * [Karatsuba](maths/karatsuba.py) * [Krishnamurthy Number](maths/krishnamurthy_number.py) @@ -675,8 +677,8 @@ * [Radians](maths/radians.py) * [Radix2 Fft](maths/radix2_fft.py) * [Remove Digit](maths/remove_digit.py) - * [Rkf45](maths/rkf45.py) * [Runge Kutta](maths/runge_kutta.py) + * [Runge Kutta Fehlberg 45](maths/runge_kutta_fehlberg_45.py) * [Segmented Sieve](maths/segmented_sieve.py) * Series * [Arithmetic](maths/series/arithmetic.py) diff --git a/scripts/validate_solutions.py b/scripts/validate_solutions.py index ca4af5261..f27ec9ca6 100755 --- a/scripts/validate_solutions.py +++ b/scripts/validate_solutions.py @@ -55,7 +55,7 @@ def added_solution_file_path() -> list[pathlib.Path]: solution_file_paths = [] headers = { "Accept": "application/vnd.github.v3+json", - "Authorization": "token " + os.environ["GITHUB_TOKEN"], + "Authorization": f"token {os.getenv('GITHUB_TOKEN', '')}", } files = requests.get(get_files_url(), headers=headers).json() for file in files: From 7bdd1cd2beadf494685d1da63fb410343290de98 Mon Sep 17 00:00:00 2001 From: Barun Parua <76466796+Baron105@users.noreply.github.com> Date: Sun, 15 Oct 2023 22:43:40 +0530 Subject: [PATCH 127/306] updated physics/archimedes_principle.py (#10479) * avg and mps speed formulae added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * avg and mps speed formulae added * fixed_spacing * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * spacing * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ws * added amicable numbers * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added amicable numbers * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * spacing * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * removed * changed name of file and added code improvements * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * issues fixed due to pi * requested changes added * added some doctests for exception handling, imported g from scipy and allowed zero gravity * removed_scipy_import * Update and rename archimedes_principle.py to archimedes_principle_of_buoyant_force.py * Update archimedes_principle_of_buoyant_force.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- physics/archimedes_principle.py | 49 --------------- .../archimedes_principle_of_buoyant_force.py | 63 +++++++++++++++++++ 2 files changed, 63 insertions(+), 49 deletions(-) delete mode 100644 physics/archimedes_principle.py create mode 100644 physics/archimedes_principle_of_buoyant_force.py diff --git a/physics/archimedes_principle.py b/physics/archimedes_principle.py deleted file mode 100644 index 6ecfc65e7..000000000 --- a/physics/archimedes_principle.py +++ /dev/null @@ -1,49 +0,0 @@ -""" -Calculates buoyant force on object submerged within static fluid. -Discovered by greek mathematician, Archimedes. The principle is named after him. - -Equation for calculating buoyant force: -Fb = ρ * V * g - -Source: -- https://en.wikipedia.org/wiki/Archimedes%27_principle -""" - - -# Acceleration Constant on Earth (unit m/s^2) -g = 9.80665 - - -def archimedes_principle( - fluid_density: float, volume: float, gravity: float = g -) -> float: - """ - Args: - fluid_density: density of fluid (kg/m^3) - volume: volume of object / liquid being displaced by object - gravity: Acceleration from gravity. Gravitational force on system, - Default is Earth Gravity - returns: - buoyant force on object in Newtons - - >>> archimedes_principle(fluid_density=997, volume=0.5, gravity=9.8) - 4885.3 - >>> archimedes_principle(fluid_density=997, volume=0.7) - 6844.061035 - """ - - if fluid_density <= 0: - raise ValueError("Impossible fluid density") - if volume < 0: - raise ValueError("Impossible Object volume") - if gravity <= 0: - raise ValueError("Impossible Gravity") - - return fluid_density * gravity * volume - - -if __name__ == "__main__": - import doctest - - # run doctest - doctest.testmod() diff --git a/physics/archimedes_principle_of_buoyant_force.py b/physics/archimedes_principle_of_buoyant_force.py new file mode 100644 index 000000000..5f5698372 --- /dev/null +++ b/physics/archimedes_principle_of_buoyant_force.py @@ -0,0 +1,63 @@ +""" +Calculate the buoyant force of any body completely or partially submerged in a static +fluid. This principle was discovered by the Greek mathematician Archimedes. + +Equation for calculating buoyant force: +Fb = ρ * V * g + +https://en.wikipedia.org/wiki/Archimedes%27_principle +""" + + +# Acceleration Constant on Earth (unit m/s^2) +g = 9.80665 # Also available in scipy.constants.g + + +def archimedes_principle( + fluid_density: float, volume: float, gravity: float = g +) -> float: + """ + Args: + fluid_density: density of fluid (kg/m^3) + volume: volume of object/liquid being displaced by the object (m^3) + gravity: Acceleration from gravity. Gravitational force on the system, + The default is Earth Gravity + returns: + the buoyant force on an object in Newtons + + >>> archimedes_principle(fluid_density=500, volume=4, gravity=9.8) + 19600.0 + >>> archimedes_principle(fluid_density=997, volume=0.5, gravity=9.8) + 4885.3 + >>> archimedes_principle(fluid_density=997, volume=0.7) + 6844.061035 + >>> archimedes_principle(fluid_density=997, volume=-0.7) + Traceback (most recent call last): + ... + ValueError: Impossible object volume + >>> archimedes_principle(fluid_density=0, volume=0.7) + Traceback (most recent call last): + ... + ValueError: Impossible fluid density + >>> archimedes_principle(fluid_density=997, volume=0.7, gravity=0) + 0.0 + >>> archimedes_principle(fluid_density=997, volume=0.7, gravity=-9.8) + Traceback (most recent call last): + ... + ValueError: Impossible gravity + """ + + if fluid_density <= 0: + raise ValueError("Impossible fluid density") + if volume <= 0: + raise ValueError("Impossible object volume") + if gravity < 0: + raise ValueError("Impossible gravity") + + return fluid_density * gravity * volume + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 89d12dfe99d51f7df983ddbc6b0c93e1130fc47b Mon Sep 17 00:00:00 2001 From: Kosuri L Indu <118645569+kosuri-indu@users.noreply.github.com> Date: Mon, 16 Oct 2023 00:57:47 +0530 Subject: [PATCH 128/306] [Add] : Wildcard Matching program under DYNAMIC PROGRAMMING (#10403) * To add wildcard_matching.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changes for doctest errors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * code changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- dynamic_programming/wildcard_matching.py | 62 ++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 dynamic_programming/wildcard_matching.py diff --git a/dynamic_programming/wildcard_matching.py b/dynamic_programming/wildcard_matching.py new file mode 100644 index 000000000..4ffc4b5d4 --- /dev/null +++ b/dynamic_programming/wildcard_matching.py @@ -0,0 +1,62 @@ +""" +Given two strings, an input string and a pattern, +this program checks if the input string matches the pattern. + +Example : +input_string = "baaabab" +pattern = "*****ba*****ab" +Output: True + +This problem can be solved using the concept of "DYNAMIC PROGRAMMING". + +We create a 2D boolean matrix, where each entry match_matrix[i][j] is True +if the first i characters in input_string match the first j characters +of pattern. We initialize the first row and first column based on specific +rules, then fill up the rest of the matrix using a bottom-up dynamic +programming approach. + +The amount of match that will be determined is equal to match_matrix[n][m] +where n and m are lengths of the input_string and pattern respectively. + +""" + + +def is_pattern_match(input_string: str, pattern: str) -> bool: + """ + >>> is_pattern_match('baaabab','*****ba*****ba') + False + >>> is_pattern_match('baaabab','*****ba*****ab') + True + >>> is_pattern_match('aa','*') + True + """ + + input_length = len(input_string) + pattern_length = len(pattern) + + match_matrix = [[False] * (pattern_length + 1) for _ in range(input_length + 1)] + + match_matrix[0][0] = True + + for j in range(1, pattern_length + 1): + if pattern[j - 1] == "*": + match_matrix[0][j] = match_matrix[0][j - 1] + + for i in range(1, input_length + 1): + for j in range(1, pattern_length + 1): + if pattern[j - 1] in ("?", input_string[i - 1]): + match_matrix[i][j] = match_matrix[i - 1][j - 1] + elif pattern[j - 1] == "*": + match_matrix[i][j] = match_matrix[i - 1][j] or match_matrix[i][j - 1] + else: + match_matrix[i][j] = False + + return match_matrix[input_length][pattern_length] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + print(f"{is_pattern_match('baaabab','*****ba*****ab')}") From 4004b862d583a32cb1a809c4ea54d87635a273eb Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sun, 15 Oct 2023 21:40:13 +0200 Subject: [PATCH 129/306] Revert "validate_solutions.py: os.getenv('GITHUB_TOKEN', '')" (#10552) * Revert "validate_solutions.py: os.getenv('GITHUB_TOKEN', '') (#10546)" This reverts commit 68e6d5ad7e9af8929a22a889b1182706abbfcb50. * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 4 +++- scripts/validate_solutions.py | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 6213f26b6..5c63e6316 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -265,6 +265,7 @@ * [Postfix Evaluation](data_structures/stacks/postfix_evaluation.py) * [Prefix Evaluation](data_structures/stacks/prefix_evaluation.py) * [Stack](data_structures/stacks/stack.py) + * [Stack Using Two Queues](data_structures/stacks/stack_using_two_queues.py) * [Stack With Doubly Linked List](data_structures/stacks/stack_with_doubly_linked_list.py) * [Stack With Singly Linked List](data_structures/stacks/stack_with_singly_linked_list.py) * [Stock Span Problem](data_structures/stacks/stock_span_problem.py) @@ -361,6 +362,7 @@ * [Trapped Water](dynamic_programming/trapped_water.py) * [Tribonacci](dynamic_programming/tribonacci.py) * [Viterbi](dynamic_programming/viterbi.py) + * [Wildcard Matching](dynamic_programming/wildcard_matching.py) * [Word Break](dynamic_programming/word_break.py) ## Electronics @@ -791,7 +793,7 @@ ## Physics * [Altitude Pressure](physics/altitude_pressure.py) - * [Archimedes Principle](physics/archimedes_principle.py) + * [Archimedes Principle Of Buoyant Force](physics/archimedes_principle_of_buoyant_force.py) * [Basic Orbital Capture](physics/basic_orbital_capture.py) * [Casimir Effect](physics/casimir_effect.py) * [Centripetal Force](physics/centripetal_force.py) diff --git a/scripts/validate_solutions.py b/scripts/validate_solutions.py index f27ec9ca6..ca4af5261 100755 --- a/scripts/validate_solutions.py +++ b/scripts/validate_solutions.py @@ -55,7 +55,7 @@ def added_solution_file_path() -> list[pathlib.Path]: solution_file_paths = [] headers = { "Accept": "application/vnd.github.v3+json", - "Authorization": f"token {os.getenv('GITHUB_TOKEN', '')}", + "Authorization": "token " + os.environ["GITHUB_TOKEN"], } files = requests.get(get_files_url(), headers=headers).json() for file in files: From 902278f656b38ed68e148cf8c9ac2cbd10fcfb7e Mon Sep 17 00:00:00 2001 From: Aasheesh <126905285+AasheeshLikePanner@users.noreply.github.com> Date: Mon, 16 Oct 2023 01:26:02 +0530 Subject: [PATCH 130/306] Changes the code To return the list in dynamic_programming/subset_generation.py (#10191) * Changing the code to return tuple * Changing the code to return tuple * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dynamic_programming/subset_generation.py Co-authored-by: Christian Clauss * Adding doctests in subset_generation.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update subset_generation.py * Update subset_generation.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update subset_generation.py * Update subset_generation.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dynamic_programming/subset_generation.py Co-authored-by: Christian Clauss * Update stock_span_problem.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update subset_generation.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update subset_generation.py * Update subset_generation.py * Update subset_generation.py * Update subset_generation.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- dynamic_programming/subset_generation.py | 90 ++++++++++++++---------- 1 file changed, 53 insertions(+), 37 deletions(-) diff --git a/dynamic_programming/subset_generation.py b/dynamic_programming/subset_generation.py index 819fd8106..1be412b93 100644 --- a/dynamic_programming/subset_generation.py +++ b/dynamic_programming/subset_generation.py @@ -1,44 +1,60 @@ -# Print all subset combinations of n element in given set of r element. - - -def combination_util(arr, n, r, index, data, i): +def subset_combinations(elements: list[int], n: int) -> list: """ - Current combination is ready to be printed, print it - arr[] ---> Input Array - data[] ---> Temporary array to store current combination - start & end ---> Staring and Ending indexes in arr[] - index ---> Current index in data[] - r ---> Size of a combination to be printed + Compute n-element combinations from a given list using dynamic programming. + Args: + elements: The list of elements from which combinations will be generated. + n: The number of elements in each combination. + Returns: + A list of tuples, each representing a combination of n elements. + >>> subset_combinations(elements=[10, 20, 30, 40], n=2) + [(10, 20), (10, 30), (10, 40), (20, 30), (20, 40), (30, 40)] + >>> subset_combinations(elements=[1, 2, 3], n=1) + [(1,), (2,), (3,)] + >>> subset_combinations(elements=[1, 2, 3], n=3) + [(1, 2, 3)] + >>> subset_combinations(elements=[42], n=1) + [(42,)] + >>> subset_combinations(elements=[6, 7, 8, 9], n=4) + [(6, 7, 8, 9)] + >>> subset_combinations(elements=[10, 20, 30, 40, 50], n=0) + [()] + >>> subset_combinations(elements=[1, 2, 3, 4], n=2) + [(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)] + >>> subset_combinations(elements=[1, 'apple', 3.14], n=2) + [(1, 'apple'), (1, 3.14), ('apple', 3.14)] + >>> subset_combinations(elements=['single'], n=0) + [()] + >>> subset_combinations(elements=[], n=9) + [] + >>> from itertools import combinations + >>> all(subset_combinations(items, n) == list(combinations(items, n)) + ... for items, n in ( + ... ([10, 20, 30, 40], 2), ([1, 2, 3], 1), ([1, 2, 3], 3), ([42], 1), + ... ([6, 7, 8, 9], 4), ([10, 20, 30, 40, 50], 1), ([1, 2, 3, 4], 2), + ... ([1, 'apple', 3.14], 2), (['single'], 0), ([], 9))) + True """ - if index == r: - for j in range(r): - print(data[j], end=" ") - print(" ") - return - # When no more elements are there to put in data[] - if i >= n: - return - # current is included, put next at next location - data[index] = arr[i] - combination_util(arr, n, r, index + 1, data, i + 1) - # current is excluded, replace it with - # next (Note that i+1 is passed, but - # index is not changed) - combination_util(arr, n, r, index, data, i + 1) - # The main function that prints all combinations - # of size r in arr[] of size n. This function - # mainly uses combinationUtil() + r = len(elements) + if n > r: + return [] + dp: list[list[tuple]] = [[] for _ in range(r + 1)] -def print_combination(arr, n, r): - # A temporary array to store all combination one by one - data = [0] * r - # Print all combination using temporary array 'data[]' - combination_util(arr, n, r, 0, data, 0) + dp[0].append(()) + + for i in range(1, r + 1): + for j in range(i, 0, -1): + for prev_combination in dp[j - 1]: + dp[j].append(tuple(prev_combination) + (elements[i - 1],)) + + try: + return sorted(dp[n]) + except TypeError: + return dp[n] if __name__ == "__main__": - # Driver code to check the function above - arr = [10, 20, 30, 40, 50] - print_combination(arr, len(arr), 3) - # This code is contributed by Ambuj sahu + from doctest import testmod + + testmod() + print(f"{subset_combinations(elements=[10, 20, 30, 40], n=2) = }") From 3d6f3c41881da75653b804d7a5964ea90df9d2ad Mon Sep 17 00:00:00 2001 From: hollowcrust <72879387+hollowcrust@users.noreply.github.com> Date: Mon, 16 Oct 2023 04:13:27 +0800 Subject: [PATCH 131/306] Added data_structures/arrays/sparse_table.py (#10437) * Create sparse_table.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Descriptive names for variables * Fix ruff check error * Update sparse_table.py * Add comments, change variable names * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix typo * Update sparse_table.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- data_structures/arrays/sparse_table.py | 94 ++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 data_structures/arrays/sparse_table.py diff --git a/data_structures/arrays/sparse_table.py b/data_structures/arrays/sparse_table.py new file mode 100644 index 000000000..a15d5649e --- /dev/null +++ b/data_structures/arrays/sparse_table.py @@ -0,0 +1,94 @@ +""" + Sparse table is a data structure that allows answering range queries on + a static number list, i.e. the elements do not change throughout all the queries. + + The implementation below will solve the problem of Range Minimum Query: + Finding the minimum value of a subset [L..R] of a static number list. + + Overall time complexity: O(nlogn) + Overall space complexity: O(nlogn) + + Wikipedia link: https://en.wikipedia.org/wiki/Range_minimum_query +""" +from math import log2 + + +def build_sparse_table(number_list: list[int]) -> list[list[int]]: + """ + Precompute range minimum queries with power of two length and store the precomputed + values in a table. + + >>> build_sparse_table([8, 1, 0, 3, 4, 9, 3]) + [[8, 1, 0, 3, 4, 9, 3], [1, 0, 0, 3, 4, 3, 0], [0, 0, 0, 3, 0, 0, 0]] + >>> build_sparse_table([3, 1, 9]) + [[3, 1, 9], [1, 1, 0]] + >>> build_sparse_table([]) + Traceback (most recent call last): + ... + ValueError: empty number list not allowed + """ + if not number_list: + raise ValueError("empty number list not allowed") + + length = len(number_list) + # Initialise sparse_table -- sparse_table[j][i] represents the minimum value of the + # subset of length (2 ** j) of number_list, starting from index i. + + # smallest power of 2 subset length that fully covers number_list + row = int(log2(length)) + 1 + sparse_table = [[0 for i in range(length)] for j in range(row)] + + # minimum of subset of length 1 is that value itself + for i, value in enumerate(number_list): + sparse_table[0][i] = value + j = 1 + + # compute the minimum value for all intervals with size (2 ** j) + while (1 << j) <= length: + i = 0 + # while subset starting from i still have at least (2 ** j) elements + while (i + (1 << j) - 1) < length: + # split range [i, i + 2 ** j] and find minimum of 2 halves + sparse_table[j][i] = min( + sparse_table[j - 1][i + (1 << (j - 1))], sparse_table[j - 1][i] + ) + i += 1 + j += 1 + return sparse_table + + +def query(sparse_table: list[list[int]], left_bound: int, right_bound: int) -> int: + """ + >>> query(build_sparse_table([8, 1, 0, 3, 4, 9, 3]), 0, 4) + 0 + >>> query(build_sparse_table([8, 1, 0, 3, 4, 9, 3]), 4, 6) + 3 + >>> query(build_sparse_table([3, 1, 9]), 2, 2) + 9 + >>> query(build_sparse_table([3, 1, 9]), 0, 1) + 1 + >>> query(build_sparse_table([8, 1, 0, 3, 4, 9, 3]), 0, 11) + Traceback (most recent call last): + ... + IndexError: list index out of range + >>> query(build_sparse_table([]), 0, 0) + Traceback (most recent call last): + ... + ValueError: empty number list not allowed + """ + if left_bound < 0 or right_bound >= len(sparse_table[0]): + raise IndexError("list index out of range") + + # highest subset length of power of 2 that is within range [left_bound, right_bound] + j = int(log2(right_bound - left_bound + 1)) + + # minimum of 2 overlapping smaller subsets: + # [left_bound, left_bound + 2 ** j - 1] and [right_bound - 2 ** j + 1, right_bound] + return min(sparse_table[j][right_bound - (1 << j) + 1], sparse_table[j][left_bound]) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + print(f"{query(build_sparse_table([3, 1, 9]), 2, 2) = }") From ec952927baea776bcb0f35d282448d32f3721047 Mon Sep 17 00:00:00 2001 From: dhruvtrigotra <72982592+dhruvtrigotra@users.noreply.github.com> Date: Mon, 16 Oct 2023 02:11:39 +0530 Subject: [PATCH 132/306] charging_inductor (#10427) * charging_capacitor * charging_capacitor * Final edits * charging_inductor --------- Co-authored-by: Christian Clauss --- electronics/charging_inductor.py | 96 ++++++++++++++++++++++++++++++++ 1 file changed, 96 insertions(+) create mode 100644 electronics/charging_inductor.py diff --git a/electronics/charging_inductor.py b/electronics/charging_inductor.py new file mode 100644 index 000000000..e5c0126c2 --- /dev/null +++ b/electronics/charging_inductor.py @@ -0,0 +1,96 @@ +# source - The ARRL Handbook for Radio Communications +# https://en.wikipedia.org/wiki/RL_circuit + +""" +Description +----------- +Inductor is a passive electronic device which stores energy but unlike capacitor, it +stores energy in its 'magnetic field' or 'magnetostatic field'. + +When inductor is connected to 'DC' current source nothing happens it just works like a +wire because it's real effect cannot be seen while 'DC' is connected, its not even +going to store energy. Inductor stores energy only when it is working on 'AC' current. + +Connecting a inductor in series with a resistor(when R = 0) to a 'AC' potential source, +from zero to a finite value causes a sudden voltage to induced in inductor which +opposes the current. which results in initially slowly current rise. However it would +cease if there is no further changes in current. With resistance zero current will never +stop rising. + +'Resistance(ohms) / Inductance(henrys)' is known as RL-timeconstant. It also represents +as τ (tau). While the charging of a inductor with a resistor results in +a exponential function. + +when inductor is connected across 'AC' potential source. It starts to store the energy +in its 'magnetic field'.with the help 'RL-time-constant' we can find current at any time +in inductor while it is charging. +""" +from math import exp # value of exp = 2.718281828459… + + +def charging_inductor( + source_voltage: float, # source_voltage should be in volts. + resistance: float, # resistance should be in ohms. + inductance: float, # inductance should be in henrys. + time: float, # time should in seconds. +) -> float: + """ + Find inductor current at any nth second after initiating its charging. + + Examples + -------- + >>> charging_inductor(source_voltage=5.8,resistance=1.5,inductance=2.3,time=2) + 2.817 + + >>> charging_inductor(source_voltage=8,resistance=5,inductance=3,time=2) + 1.543 + + >>> charging_inductor(source_voltage=8,resistance=5*pow(10,2),inductance=3,time=2) + 0.016 + + >>> charging_inductor(source_voltage=-8,resistance=100,inductance=15,time=12) + Traceback (most recent call last): + ... + ValueError: Source voltage must be positive. + + >>> charging_inductor(source_voltage=80,resistance=-15,inductance=100,time=5) + Traceback (most recent call last): + ... + ValueError: Resistance must be positive. + + >>> charging_inductor(source_voltage=12,resistance=200,inductance=-20,time=5) + Traceback (most recent call last): + ... + ValueError: Inductance must be positive. + + >>> charging_inductor(source_voltage=0,resistance=200,inductance=20,time=5) + Traceback (most recent call last): + ... + ValueError: Source voltage must be positive. + + >>> charging_inductor(source_voltage=10,resistance=0,inductance=20,time=5) + Traceback (most recent call last): + ... + ValueError: Resistance must be positive. + + >>> charging_inductor(source_voltage=15, resistance=25, inductance=0, time=5) + Traceback (most recent call last): + ... + ValueError: Inductance must be positive. + """ + + if source_voltage <= 0: + raise ValueError("Source voltage must be positive.") + if resistance <= 0: + raise ValueError("Resistance must be positive.") + if inductance <= 0: + raise ValueError("Inductance must be positive.") + return round( + source_voltage / resistance * (1 - exp((-time * resistance) / inductance)), 3 + ) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From bcda3bf64ea20db11cb4b1b81536e2f05ee584fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ab=C3=ADlio=20Azevedo?= Date: Sun, 15 Oct 2023 18:31:11 -0300 Subject: [PATCH 133/306] test: adding more tests to a star algorithm (#10397) * test: adding more tests to a star algorithm * Apply suggestions from code review * Update a_star.py --------- Co-authored-by: Tianyi Zheng --- graphs/a_star.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/graphs/a_star.py b/graphs/a_star.py index e8735179e..06da3b5cd 100644 --- a/graphs/a_star.py +++ b/graphs/a_star.py @@ -16,6 +16,31 @@ def search( cost: int, heuristic: list[list[int]], ) -> tuple[list[list[int]], list[list[int]]]: + """ + Search for a path on a grid avoiding obstacles. + >>> grid = [[0, 1, 0, 0, 0, 0], + ... [0, 1, 0, 0, 0, 0], + ... [0, 1, 0, 0, 0, 0], + ... [0, 1, 0, 0, 1, 0], + ... [0, 0, 0, 0, 1, 0]] + >>> init = [0, 0] + >>> goal = [len(grid) - 1, len(grid[0]) - 1] + >>> cost = 1 + >>> heuristic = [[0] * len(grid[0]) for _ in range(len(grid))] + >>> heuristic = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] + >>> for i in range(len(grid)): + ... for j in range(len(grid[0])): + ... heuristic[i][j] = abs(i - goal[0]) + abs(j - goal[1]) + ... if grid[i][j] == 1: + ... heuristic[i][j] = 99 + >>> path, action = search(grid, init, goal, cost, heuristic) + >>> path # doctest: +NORMALIZE_WHITESPACE + [[0, 0], [1, 0], [2, 0], [3, 0], [4, 0], [4, 1], [4, 2], [4, 3], [3, 3], + [2, 3], [2, 4], [2, 5], [3, 5], [4, 5]] + >>> action # doctest: +NORMALIZE_WHITESPACE + [[0, 0, 0, 0, 0, 0], [2, 0, 0, 0, 0, 0], [2, 0, 0, 0, 3, 3], + [2, 0, 0, 0, 0, 2], [2, 3, 3, 3, 0, 2]] + """ closed = [ [0 for col in range(len(grid[0]))] for row in range(len(grid)) ] # the reference grid From d00888de7629b093bcf750ae046318be1e9a1fa3 Mon Sep 17 00:00:00 2001 From: Jeel Gajera <83470656+JeelGajera@users.noreply.github.com> Date: Mon, 16 Oct 2023 03:19:53 +0530 Subject: [PATCH 134/306] feat: adding Apriori Algorithm (#10491) * feat: adding Apriori Algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: doctest, typo * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: type error, code refactore * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: refactore code * fix: doctest * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: E501, B007 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: err * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: arg typ err * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: typo * fix: typo * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Replace generate_candidates() with itertools.combinations() * mypy * Update apriori_algorithm.py --------- Co-authored-by: Jeel Gajera Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 + machine_learning/apriori_algorithm.py | 112 ++++++++++++++++++++++++++ 2 files changed, 113 insertions(+) create mode 100644 machine_learning/apriori_algorithm.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 5c63e6316..55781df03 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -554,6 +554,7 @@ * [Word Frequency Functions](machine_learning/word_frequency_functions.py) * [Xgboost Classifier](machine_learning/xgboost_classifier.py) * [Xgboost Regressor](machine_learning/xgboost_regressor.py) + * [Apriori Algorithm](machine_learning/apriori_algorithm.py) ## Maths * [Abs](maths/abs.py) diff --git a/machine_learning/apriori_algorithm.py b/machine_learning/apriori_algorithm.py new file mode 100644 index 000000000..d9fd1f82e --- /dev/null +++ b/machine_learning/apriori_algorithm.py @@ -0,0 +1,112 @@ +""" +Apriori Algorithm is a Association rule mining technique, also known as market basket +analysis, aims to discover interesting relationships or associations among a set of +items in a transactional or relational database. + +For example, Apriori Algorithm states: "If a customer buys item A and item B, then they +are likely to buy item C." This rule suggests a relationship between items A, B, and C, +indicating that customers who purchased A and B are more likely to also purchase item C. + +WIKI: https://en.wikipedia.org/wiki/Apriori_algorithm +Examples: https://www.kaggle.com/code/earthian/apriori-association-rules-mining +""" +from itertools import combinations + + +def load_data() -> list[list[str]]: + """ + Returns a sample transaction dataset. + + >>> load_data() + [['milk'], ['milk', 'butter'], ['milk', 'bread'], ['milk', 'bread', 'chips']] + """ + return [["milk"], ["milk", "butter"], ["milk", "bread"], ["milk", "bread", "chips"]] + + +def prune(itemset: list, candidates: list, length: int) -> list: + """ + Prune candidate itemsets that are not frequent. + The goal of pruning is to filter out candidate itemsets that are not frequent. This + is done by checking if all the (k-1) subsets of a candidate itemset are present in + the frequent itemsets of the previous iteration (valid subsequences of the frequent + itemsets from the previous iteration). + + Prunes candidate itemsets that are not frequent. + + >>> itemset = ['X', 'Y', 'Z'] + >>> candidates = [['X', 'Y'], ['X', 'Z'], ['Y', 'Z']] + >>> prune(itemset, candidates, 2) + [['X', 'Y'], ['X', 'Z'], ['Y', 'Z']] + + >>> itemset = ['1', '2', '3', '4'] + >>> candidates = ['1', '2', '4'] + >>> prune(itemset, candidates, 3) + [] + """ + pruned = [] + for candidate in candidates: + is_subsequence = True + for item in candidate: + if item not in itemset or itemset.count(item) < length - 1: + is_subsequence = False + break + if is_subsequence: + pruned.append(candidate) + return pruned + + +def apriori(data: list[list[str]], min_support: int) -> list[tuple[list[str], int]]: + """ + Returns a list of frequent itemsets and their support counts. + + >>> data = [['A', 'B', 'C'], ['A', 'B'], ['A', 'C'], ['A', 'D'], ['B', 'C']] + >>> apriori(data, 2) + [(['A', 'B'], 1), (['A', 'C'], 2), (['B', 'C'], 2)] + + >>> data = [['1', '2', '3'], ['1', '2'], ['1', '3'], ['1', '4'], ['2', '3']] + >>> apriori(data, 3) + [] + """ + itemset = [list(transaction) for transaction in data] + frequent_itemsets = [] + length = 1 + + while itemset: + # Count itemset support + counts = [0] * len(itemset) + for transaction in data: + for j, candidate in enumerate(itemset): + if all(item in transaction for item in candidate): + counts[j] += 1 + + # Prune infrequent itemsets + itemset = [item for i, item in enumerate(itemset) if counts[i] >= min_support] + + # Append frequent itemsets (as a list to maintain order) + for i, item in enumerate(itemset): + frequent_itemsets.append((sorted(item), counts[i])) + + length += 1 + itemset = prune(itemset, list(combinations(itemset, length)), length) + + return frequent_itemsets + + +if __name__ == "__main__": + """ + Apriori algorithm for finding frequent itemsets. + + Args: + data: A list of transactions, where each transaction is a list of items. + min_support: The minimum support threshold for frequent itemsets. + + Returns: + A list of frequent itemsets along with their support counts. + """ + import doctest + + doctest.testmod() + + # user-defined threshold or minimum support level + frequent_itemsets = apriori(data=load_data(), min_support=2) + print("\n".join(f"{itemset}: {support}" for itemset, support in frequent_itemsets)) From e6aae1cf66b7e962b886255703b5802d58f27fd3 Mon Sep 17 00:00:00 2001 From: Pooja Sharma <75516191+Shailaputri@users.noreply.github.com> Date: Mon, 16 Oct 2023 05:02:45 +0530 Subject: [PATCH 135/306] Dynamic programming/matrix chain multiplication (#10562) * updating DIRECTORY.md * spell changes * updating DIRECTORY.md * real world applications * updating DIRECTORY.md * Update matrix_chain_multiplication.py Add a non-dp solution with benchmarks. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update matrix_chain_multiplication.py * Update matrix_chain_multiplication.py * Update matrix_chain_multiplication.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Pooja Sharma Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 5 +- .../matrix_chain_multiplication.py | 143 ++++++++++++++++++ 2 files changed, 147 insertions(+), 1 deletion(-) create mode 100644 dynamic_programming/matrix_chain_multiplication.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 55781df03..cef1e06b7 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -182,6 +182,7 @@ * [Permutations](data_structures/arrays/permutations.py) * [Prefix Sum](data_structures/arrays/prefix_sum.py) * [Product Sum](data_structures/arrays/product_sum.py) + * [Sparse Table](data_structures/arrays/sparse_table.py) * Binary Tree * [Avl Tree](data_structures/binary_tree/avl_tree.py) * [Basic Binary Tree](data_structures/binary_tree/basic_binary_tree.py) @@ -340,6 +341,7 @@ * [Longest Increasing Subsequence O(Nlogn)](dynamic_programming/longest_increasing_subsequence_o(nlogn).py) * [Longest Palindromic Subsequence](dynamic_programming/longest_palindromic_subsequence.py) * [Longest Sub Array](dynamic_programming/longest_sub_array.py) + * [Matrix Chain Multiplication](dynamic_programming/matrix_chain_multiplication.py) * [Matrix Chain Order](dynamic_programming/matrix_chain_order.py) * [Max Non Adjacent Sum](dynamic_programming/max_non_adjacent_sum.py) * [Max Product Subarray](dynamic_programming/max_product_subarray.py) @@ -370,6 +372,7 @@ * [Builtin Voltage](electronics/builtin_voltage.py) * [Carrier Concentration](electronics/carrier_concentration.py) * [Charging Capacitor](electronics/charging_capacitor.py) + * [Charging Inductor](electronics/charging_inductor.py) * [Circular Convolution](electronics/circular_convolution.py) * [Coulombs Law](electronics/coulombs_law.py) * [Electric Conductivity](electronics/electric_conductivity.py) @@ -524,6 +527,7 @@ * [Simplex](linear_programming/simplex.py) ## Machine Learning + * [Apriori Algorithm](machine_learning/apriori_algorithm.py) * [Astar](machine_learning/astar.py) * [Data Transformations](machine_learning/data_transformations.py) * [Decision Tree](machine_learning/decision_tree.py) @@ -554,7 +558,6 @@ * [Word Frequency Functions](machine_learning/word_frequency_functions.py) * [Xgboost Classifier](machine_learning/xgboost_classifier.py) * [Xgboost Regressor](machine_learning/xgboost_regressor.py) - * [Apriori Algorithm](machine_learning/apriori_algorithm.py) ## Maths * [Abs](maths/abs.py) diff --git a/dynamic_programming/matrix_chain_multiplication.py b/dynamic_programming/matrix_chain_multiplication.py new file mode 100644 index 000000000..084254a61 --- /dev/null +++ b/dynamic_programming/matrix_chain_multiplication.py @@ -0,0 +1,143 @@ +""" +Find the minimum number of multiplications needed to multiply chain of matrices. +Reference: https://www.geeksforgeeks.org/matrix-chain-multiplication-dp-8/ + +The algorithm has interesting real-world applications. Example: +1. Image transformations in Computer Graphics as images are composed of matrix. +2. Solve complex polynomial equations in the field of algebra using least processing + power. +3. Calculate overall impact of macroeconomic decisions as economic equations involve a + number of variables. +4. Self-driving car navigation can be made more accurate as matrix multiplication can + accurately determine position and orientation of obstacles in short time. + +Python doctests can be run with the following command: +python -m doctest -v matrix_chain_multiply.py + +Given a sequence arr[] that represents chain of 2D matrices such that the dimension of +the ith matrix is arr[i-1]*arr[i]. +So suppose arr = [40, 20, 30, 10, 30] means we have 4 matrices of dimensions +40*20, 20*30, 30*10 and 10*30. + +matrix_chain_multiply() returns an integer denoting minimum number of multiplications to +multiply the chain. + +We do not need to perform actual multiplication here. +We only need to decide the order in which to perform the multiplication. + +Hints: +1. Number of multiplications (ie cost) to multiply 2 matrices +of size m*p and p*n is m*p*n. +2. Cost of matrix multiplication is associative ie (M1*M2)*M3 != M1*(M2*M3) +3. Matrix multiplication is not commutative. So, M1*M2 does not mean M2*M1 can be done. +4. To determine the required order, we can try different combinations. +So, this problem has overlapping sub-problems and can be solved using recursion. +We use Dynamic Programming for optimal time complexity. + +Example input: +arr = [40, 20, 30, 10, 30] +output: 26000 +""" +from collections.abc import Iterator +from contextlib import contextmanager +from functools import cache +from sys import maxsize + + +def matrix_chain_multiply(arr: list[int]) -> int: + """ + Find the minimum number of multiplcations required to multiply the chain of matrices + + Args: + arr: The input array of integers. + + Returns: + Minimum number of multiplications needed to multiply the chain + + Examples: + >>> matrix_chain_multiply([1, 2, 3, 4, 3]) + 30 + >>> matrix_chain_multiply([10]) + 0 + >>> matrix_chain_multiply([10, 20]) + 0 + >>> matrix_chain_multiply([19, 2, 19]) + 722 + >>> matrix_chain_multiply(list(range(1, 100))) + 323398 + + # >>> matrix_chain_multiply(list(range(1, 251))) + # 2626798 + """ + if len(arr) < 2: + return 0 + # initialising 2D dp matrix + n = len(arr) + dp = [[maxsize for j in range(n)] for i in range(n)] + # we want minimum cost of multiplication of matrices + # of dimension (i*k) and (k*j). This cost is arr[i-1]*arr[k]*arr[j]. + for i in range(n - 1, 0, -1): + for j in range(i, n): + if i == j: + dp[i][j] = 0 + continue + for k in range(i, j): + dp[i][j] = min( + dp[i][j], dp[i][k] + dp[k + 1][j] + arr[i - 1] * arr[k] * arr[j] + ) + + return dp[1][n - 1] + + +def matrix_chain_order(dims: list[int]) -> int: + """ + Source: https://en.wikipedia.org/wiki/Matrix_chain_multiplication + The dynamic programming solution is faster than cached the recursive solution and + can handle larger inputs. + >>> matrix_chain_order([1, 2, 3, 4, 3]) + 30 + >>> matrix_chain_order([10]) + 0 + >>> matrix_chain_order([10, 20]) + 0 + >>> matrix_chain_order([19, 2, 19]) + 722 + >>> matrix_chain_order(list(range(1, 100))) + 323398 + + # >>> matrix_chain_order(list(range(1, 251))) # Max before RecursionError is raised + # 2626798 + """ + + @cache + def a(i: int, j: int) -> int: + return min( + (a(i, k) + dims[i] * dims[k] * dims[j] + a(k, j) for k in range(i + 1, j)), + default=0, + ) + + return a(0, len(dims) - 1) + + +@contextmanager +def elapsed_time(msg: str) -> Iterator: + # print(f"Starting: {msg}") + from time import perf_counter_ns + + start = perf_counter_ns() + yield + print(f"Finished: {msg} in {(perf_counter_ns() - start) / 10 ** 9} seconds.") + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + with elapsed_time("matrix_chain_order"): + print(f"{matrix_chain_order(list(range(1, 251))) = }") + with elapsed_time("matrix_chain_multiply"): + print(f"{matrix_chain_multiply(list(range(1, 251))) = }") + with elapsed_time("matrix_chain_order"): + print(f"{matrix_chain_order(list(range(1, 251))) = }") + with elapsed_time("matrix_chain_multiply"): + print(f"{matrix_chain_multiply(list(range(1, 251))) = }") From b6b45eb1cee564e3c563966244f124051c28b8e7 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sun, 15 Oct 2023 19:41:45 -0400 Subject: [PATCH 136/306] Fix numpy deprecation warning in `2_hidden_layers_neural_network.py` (#10424) * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * Fix deprecation warning in 2_hidden_layers_neural_network.py Fix numpy deprecation warning: DeprecationWarning: Conversion of an array with ndim > 0 to a scalar is deprecated, and will error in future. Ensure you extract a single element from your array before performing this operation. (Deprecated NumPy 1.25.) --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- neural_network/2_hidden_layers_neural_network.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neural_network/2_hidden_layers_neural_network.py b/neural_network/2_hidden_layers_neural_network.py index 9c5772326..7b374a93d 100644 --- a/neural_network/2_hidden_layers_neural_network.py +++ b/neural_network/2_hidden_layers_neural_network.py @@ -196,7 +196,7 @@ class TwoHiddenLayerNeuralNetwork: >>> output_val = numpy.array(([0], [1], [1]), dtype=float) >>> nn = TwoHiddenLayerNeuralNetwork(input_val, output_val) >>> nn.train(output_val, 1000, False) - >>> nn.predict([0,1,0]) in (0, 1) + >>> nn.predict([0, 1, 0]) in (0, 1) True """ @@ -221,7 +221,7 @@ class TwoHiddenLayerNeuralNetwork: ) ) - return int(self.layer_between_second_hidden_layer_and_output > 0.6) + return int((self.layer_between_second_hidden_layer_and_output > 0.6)[0]) def sigmoid(value: numpy.ndarray) -> numpy.ndarray: From 73ebf7bdb12f4bced39f25766ac4d2cd9b6ab525 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sun, 15 Oct 2023 19:42:55 -0400 Subject: [PATCH 137/306] Move and rename `maths/greedy_coin_change.py` (#10418) * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * Move greedy_coin_change.py to greedy_methods/ and rename file --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .../minimum_coin_change.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename maths/greedy_coin_change.py => greedy_methods/minimum_coin_change.py (100%) diff --git a/maths/greedy_coin_change.py b/greedy_methods/minimum_coin_change.py similarity index 100% rename from maths/greedy_coin_change.py rename to greedy_methods/minimum_coin_change.py From c2f14e8a78c1700a4101746a1a6e3d70be50aa07 Mon Sep 17 00:00:00 2001 From: Chris O <46587501+ChrisO345@users.noreply.github.com> Date: Mon, 16 Oct 2023 12:44:06 +1300 Subject: [PATCH 138/306] Add note to feature_request.yml about not opening issues for new algorithms (#10142) --- .github/ISSUE_TEMPLATE/feature_request.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index 09a159b21..20823bd58 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -6,6 +6,7 @@ body: attributes: value: > Before requesting please search [existing issues](https://github.com/TheAlgorithms/Python/labels/enhancement). + Do not create issues to implement new algorithms as these will be closed. Usage questions such as "How do I...?" belong on the [Discord](https://discord.gg/c7MnfGFGa6) and will be closed. @@ -13,7 +14,6 @@ body: attributes: label: "Feature description" description: > - This could be new algorithms, data structures or improving any existing - implementations. + This could include new topics or improving any existing implementations. validations: required: true From bb8f194957c4308cbb0bf16a4e07acbe34d2087e Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sun, 15 Oct 2023 20:01:01 -0400 Subject: [PATCH 139/306] Delete `texttable` from dependencies (#10565) * Disable unused dependencies Comment out dependencies in requirements.txt that are only used by currently-disabled files * Delete unused dependency --- requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 1e64818bb..05d9f1e8c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -18,7 +18,6 @@ scikit-learn statsmodels sympy tensorflow ; python_version < '3.12' -texttable tweepy xgboost # yulewalker # uncomment once audio_filters/equal_loudness_filter.py is fixed From 1a26d76c60422030cf0c57c62623866d3f3229f2 Mon Sep 17 00:00:00 2001 From: "Gabrielly de S. Pinto Dantas" Date: Sun, 15 Oct 2023 21:44:10 -0300 Subject: [PATCH 140/306] add tests for tree_sort (#10015) * add tests for tree_sort * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update tree_sort.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- sorts/tree_sort.py | 93 +++++++++++++++++++++++++++------------------- 1 file changed, 55 insertions(+), 38 deletions(-) diff --git a/sorts/tree_sort.py b/sorts/tree_sort.py index 78c3e893e..e63a3253b 100644 --- a/sorts/tree_sort.py +++ b/sorts/tree_sort.py @@ -1,53 +1,70 @@ """ Tree_sort algorithm. -Build a BST and in order traverse. +Build a Binary Search Tree and then iterate thru it to get a sorted list. """ +from __future__ import annotations + +from collections.abc import Iterator +from dataclasses import dataclass +@dataclass class Node: - # BST data structure - def __init__(self, val): - self.val = val - self.left = None - self.right = None + val: int + left: Node | None = None + right: Node | None = None - def insert(self, val): - if self.val: - if val < self.val: - if self.left is None: - self.left = Node(val) - else: - self.left.insert(val) - elif val > self.val: - if self.right is None: - self.right = Node(val) - else: - self.right.insert(val) - else: - self.val = val + def __iter__(self) -> Iterator[int]: + if self.left: + yield from self.left + yield self.val + if self.right: + yield from self.right + + def __len__(self) -> int: + return sum(1 for _ in self) + + def insert(self, val: int) -> None: + if val < self.val: + if self.left is None: + self.left = Node(val) + else: + self.left.insert(val) + elif val > self.val: + if self.right is None: + self.right = Node(val) + else: + self.right.insert(val) -def inorder(root, res): - # Recursive traversal - if root: - inorder(root.left, res) - res.append(root.val) - inorder(root.right, res) - - -def tree_sort(arr): - # Build BST +def tree_sort(arr: list[int]) -> tuple[int, ...]: + """ + >>> tree_sort([]) + () + >>> tree_sort((1,)) + (1,) + >>> tree_sort((1, 2)) + (1, 2) + >>> tree_sort([5, 2, 7]) + (2, 5, 7) + >>> tree_sort((5, -4, 9, 2, 7)) + (-4, 2, 5, 7, 9) + >>> tree_sort([5, 6, 1, -1, 4, 37, 2, 7]) + (-1, 1, 2, 4, 5, 6, 7, 37) + >>> tree_sort(range(10, -10, -1)) == tuple(sorted(range(10, -10, -1))) + True + """ if len(arr) == 0: - return arr + return tuple(arr) root = Node(arr[0]) - for i in range(1, len(arr)): - root.insert(arr[i]) - # Traverse BST in order. - res = [] - inorder(root, res) - return res + for item in arr[1:]: + root.insert(item) + return tuple(root) if __name__ == "__main__": - print(tree_sort([10, 1, 3, 2, 9, 14, 13])) + import doctest + + doctest.testmod() + print(f"{tree_sort([5, 6, 1, -1, 4, 37, -3, 7]) = }") From cc0405d05cb4c5009e8bf826e3f641c427ba70d5 Mon Sep 17 00:00:00 2001 From: Yousha Mahamuni <40205524+yousha806@users.noreply.github.com> Date: Mon, 16 Oct 2023 08:17:27 +0530 Subject: [PATCH 141/306] Update volume.py with volume of Icosahedron (#9628) * Update volume.py with volume of Icosahedron Added function to find volume of a regular Icosahedron * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review * Update volume.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- maths/volume.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/maths/volume.py b/maths/volume.py index 721974e68..b4df4e475 100644 --- a/maths/volume.py +++ b/maths/volume.py @@ -469,6 +469,35 @@ def vol_torus(torus_radius: float, tube_radius: float) -> float: return 2 * pow(pi, 2) * torus_radius * pow(tube_radius, 2) +def vol_icosahedron(tri_side: float) -> float: + """Calculate the Volume of an Icosahedron. + Wikipedia reference: https://en.wikipedia.org/wiki/Regular_icosahedron + + >>> from math import isclose + >>> isclose(vol_icosahedron(2.5), 34.088984228514256) + True + >>> isclose(vol_icosahedron(10), 2181.694990624912374) + True + >>> isclose(vol_icosahedron(5), 272.711873828114047) + True + >>> isclose(vol_icosahedron(3.49), 92.740688412033628) + True + >>> vol_icosahedron(0) + 0.0 + >>> vol_icosahedron(-1) + Traceback (most recent call last): + ... + ValueError: vol_icosahedron() only accepts non-negative values + >>> vol_icosahedron(-0.2) + Traceback (most recent call last): + ... + ValueError: vol_icosahedron() only accepts non-negative values + """ + if tri_side < 0: + raise ValueError("vol_icosahedron() only accepts non-negative values") + return tri_side**3 * (3 + 5**0.5) * 5 / 12 + + def main(): """Print the Results of Various Volume Calculations.""" print("Volumes:") @@ -489,6 +518,7 @@ def main(): print( f"Hollow Circular Cylinder: {vol_hollow_circular_cylinder(1, 2, 3) = }" ) # ~= 28.3 + print(f"Icosahedron: {vol_icosahedron(2.5) = }") # ~=34.09 if __name__ == "__main__": From f4ff73b1bdaa4349315beaf44e093c59f6c87fd3 Mon Sep 17 00:00:00 2001 From: Akshar Goyal Date: Mon, 16 Oct 2023 03:21:43 -0400 Subject: [PATCH 142/306] Converted tests into doctests (#10572) * Converted tests into doctests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Removed commented code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- boolean_algebra/and_gate.py | 18 +++--------------- boolean_algebra/imply_gate.py | 7 +++---- boolean_algebra/nand_gate.py | 17 +++-------------- boolean_algebra/nimply_gate.py | 7 +++---- boolean_algebra/not_gate.py | 13 +++---------- boolean_algebra/or_gate.py | 17 +++-------------- boolean_algebra/xnor_gate.py | 17 +++-------------- boolean_algebra/xor_gate.py | 15 +++------------ 8 files changed, 24 insertions(+), 87 deletions(-) diff --git a/boolean_algebra/and_gate.py b/boolean_algebra/and_gate.py index 834116772..f0fd45c9f 100644 --- a/boolean_algebra/and_gate.py +++ b/boolean_algebra/and_gate.py @@ -32,19 +32,7 @@ def and_gate(input_1: int, input_2: int) -> int: return int((input_1, input_2).count(0) == 0) -def test_and_gate() -> None: - """ - Tests the and_gate function - """ - assert and_gate(0, 0) == 0 - assert and_gate(0, 1) == 0 - assert and_gate(1, 0) == 0 - assert and_gate(1, 1) == 1 - - if __name__ == "__main__": - test_and_gate() - print(and_gate(1, 0)) - print(and_gate(0, 0)) - print(and_gate(0, 1)) - print(and_gate(1, 1)) + import doctest + + doctest.testmod() diff --git a/boolean_algebra/imply_gate.py b/boolean_algebra/imply_gate.py index 151a7ad64..b64ebaceb 100644 --- a/boolean_algebra/imply_gate.py +++ b/boolean_algebra/imply_gate.py @@ -34,7 +34,6 @@ def imply_gate(input_1: int, input_2: int) -> int: if __name__ == "__main__": - print(imply_gate(0, 0)) - print(imply_gate(0, 1)) - print(imply_gate(1, 0)) - print(imply_gate(1, 1)) + import doctest + + doctest.testmod() diff --git a/boolean_algebra/nand_gate.py b/boolean_algebra/nand_gate.py index ea3303d16..80f9d12db 100644 --- a/boolean_algebra/nand_gate.py +++ b/boolean_algebra/nand_gate.py @@ -30,18 +30,7 @@ def nand_gate(input_1: int, input_2: int) -> int: return int((input_1, input_2).count(0) != 0) -def test_nand_gate() -> None: - """ - Tests the nand_gate function - """ - assert nand_gate(0, 0) == 1 - assert nand_gate(0, 1) == 1 - assert nand_gate(1, 0) == 1 - assert nand_gate(1, 1) == 0 - - if __name__ == "__main__": - print(nand_gate(0, 0)) - print(nand_gate(0, 1)) - print(nand_gate(1, 0)) - print(nand_gate(1, 1)) + import doctest + + doctest.testmod() diff --git a/boolean_algebra/nimply_gate.py b/boolean_algebra/nimply_gate.py index 6e34332d9..68e82c8db 100644 --- a/boolean_algebra/nimply_gate.py +++ b/boolean_algebra/nimply_gate.py @@ -34,7 +34,6 @@ def nimply_gate(input_1: int, input_2: int) -> int: if __name__ == "__main__": - print(nimply_gate(0, 0)) - print(nimply_gate(0, 1)) - print(nimply_gate(1, 0)) - print(nimply_gate(1, 1)) + import doctest + + doctest.testmod() diff --git a/boolean_algebra/not_gate.py b/boolean_algebra/not_gate.py index eb85e9e44..cfa74cf42 100644 --- a/boolean_algebra/not_gate.py +++ b/boolean_algebra/not_gate.py @@ -24,14 +24,7 @@ def not_gate(input_1: int) -> int: return 1 if input_1 == 0 else 0 -def test_not_gate() -> None: - """ - Tests the not_gate function - """ - assert not_gate(0) == 1 - assert not_gate(1) == 0 - - if __name__ == "__main__": - print(not_gate(0)) - print(not_gate(1)) + import doctest + + doctest.testmod() diff --git a/boolean_algebra/or_gate.py b/boolean_algebra/or_gate.py index aa7e6645e..0fd4e5a5d 100644 --- a/boolean_algebra/or_gate.py +++ b/boolean_algebra/or_gate.py @@ -29,18 +29,7 @@ def or_gate(input_1: int, input_2: int) -> int: return int((input_1, input_2).count(1) != 0) -def test_or_gate() -> None: - """ - Tests the or_gate function - """ - assert or_gate(0, 0) == 0 - assert or_gate(0, 1) == 1 - assert or_gate(1, 0) == 1 - assert or_gate(1, 1) == 1 - - if __name__ == "__main__": - print(or_gate(0, 1)) - print(or_gate(1, 0)) - print(or_gate(0, 0)) - print(or_gate(1, 1)) + import doctest + + doctest.testmod() diff --git a/boolean_algebra/xnor_gate.py b/boolean_algebra/xnor_gate.py index 45ab2700e..05b756da2 100644 --- a/boolean_algebra/xnor_gate.py +++ b/boolean_algebra/xnor_gate.py @@ -31,18 +31,7 @@ def xnor_gate(input_1: int, input_2: int) -> int: return 1 if input_1 == input_2 else 0 -def test_xnor_gate() -> None: - """ - Tests the xnor_gate function - """ - assert xnor_gate(0, 0) == 1 - assert xnor_gate(0, 1) == 0 - assert xnor_gate(1, 0) == 0 - assert xnor_gate(1, 1) == 1 - - if __name__ == "__main__": - print(xnor_gate(0, 0)) - print(xnor_gate(0, 1)) - print(xnor_gate(1, 0)) - print(xnor_gate(1, 1)) + import doctest + + doctest.testmod() diff --git a/boolean_algebra/xor_gate.py b/boolean_algebra/xor_gate.py index db4f5b45c..f3922e426 100644 --- a/boolean_algebra/xor_gate.py +++ b/boolean_algebra/xor_gate.py @@ -31,16 +31,7 @@ def xor_gate(input_1: int, input_2: int) -> int: return (input_1, input_2).count(0) % 2 -def test_xor_gate() -> None: - """ - Tests the xor_gate function - """ - assert xor_gate(0, 0) == 0 - assert xor_gate(0, 1) == 1 - assert xor_gate(1, 0) == 1 - assert xor_gate(1, 1) == 0 - - if __name__ == "__main__": - print(xor_gate(0, 0)) - print(xor_gate(0, 1)) + import doctest + + doctest.testmod() From 3c14e6ae3aa6506ca8e5baa73321f3a04caf83d0 Mon Sep 17 00:00:00 2001 From: Kamil <32775019+quant12345@users.noreply.github.com> Date: Mon, 16 Oct 2023 12:29:46 +0500 Subject: [PATCH 143/306] Refactoring and optimization of the lu_decomposition algorithm (#9231) * Replacing the generator with numpy vector operations from lu_decomposition. * Revert "Replacing the generator with numpy vector operations from lu_decomposition." This reverts commit ad217c66165898d62b76cc89ba09c2d7049b6448. * Replacing the generator with numpy vector operations from lu_decomposition. --- arithmetic_analysis/lu_decomposition.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arithmetic_analysis/lu_decomposition.py b/arithmetic_analysis/lu_decomposition.py index eaabce544..094b20abf 100644 --- a/arithmetic_analysis/lu_decomposition.py +++ b/arithmetic_analysis/lu_decomposition.py @@ -88,15 +88,19 @@ def lower_upper_decomposition(table: np.ndarray) -> tuple[np.ndarray, np.ndarray lower = np.zeros((rows, columns)) upper = np.zeros((rows, columns)) + + # in 'total', the necessary data is extracted through slices + # and the sum of the products is obtained. + for i in range(columns): for j in range(i): - total = sum(lower[i][k] * upper[k][j] for k in range(j)) + total = np.sum(lower[i, :i] * upper[:i, j]) if upper[j][j] == 0: raise ArithmeticError("No LU decomposition exists") lower[i][j] = (table[i][j] - total) / upper[j][j] lower[i][i] = 1 for j in range(i, columns): - total = sum(lower[i][k] * upper[k][j] for k in range(j)) + total = np.sum(lower[i, :i] * upper[:i, j]) upper[i][j] = table[i][j] - total return lower, upper From e9b3f20cec28b492b2e22e68ea61ec75ce3b9df8 Mon Sep 17 00:00:00 2001 From: hollowcrust <72879387+hollowcrust@users.noreply.github.com> Date: Mon, 16 Oct 2023 16:03:16 +0800 Subject: [PATCH 144/306] Delete dynamic_programming/longest_sub_array.py (#10073) --- dynamic_programming/longest_sub_array.py | 33 ------------------------ 1 file changed, 33 deletions(-) delete mode 100644 dynamic_programming/longest_sub_array.py diff --git a/dynamic_programming/longest_sub_array.py b/dynamic_programming/longest_sub_array.py deleted file mode 100644 index b477acf61..000000000 --- a/dynamic_programming/longest_sub_array.py +++ /dev/null @@ -1,33 +0,0 @@ -""" -Author : Yvonne - -This is a pure Python implementation of Dynamic Programming solution to the - longest_sub_array problem. - -The problem is : -Given an array, to find the longest and continuous sub array and get the max sum of the - sub array in the given array. -""" - - -class SubArray: - def __init__(self, arr): - # we need a list not a string, so do something to change the type - self.array = arr.split(",") - - def solve_sub_array(self): - rear = [int(self.array[0])] * len(self.array) - sum_value = [int(self.array[0])] * len(self.array) - for i in range(1, len(self.array)): - sum_value[i] = max( - int(self.array[i]) + sum_value[i - 1], int(self.array[i]) - ) - rear[i] = max(sum_value[i], rear[i - 1]) - return rear[len(self.array) - 1] - - -if __name__ == "__main__": - whole_array = input("please input some numbers:") - array = SubArray(whole_array) - re = array.solve_sub_array() - print(("the results is:", re)) From 96f81770d7e047f24c3203e913bf346754936330 Mon Sep 17 00:00:00 2001 From: Praful Katare <47990928+Kpraful@users.noreply.github.com> Date: Mon, 16 Oct 2023 13:43:34 +0530 Subject: [PATCH 145/306] Adds Doc test in depth_first_search_2.py (#10094) * Adds Doc test in depth_first_search_2.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes depth_first_search_2.py formatting * Cleanup --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- graphs/depth_first_search_2.py | 80 ++++++++++++++++++++++++++++++---- 1 file changed, 71 insertions(+), 9 deletions(-) diff --git a/graphs/depth_first_search_2.py b/graphs/depth_first_search_2.py index 3072d527c..5ff13af33 100644 --- a/graphs/depth_first_search_2.py +++ b/graphs/depth_first_search_2.py @@ -9,12 +9,44 @@ class Graph: # for printing the Graph vertices def print_graph(self) -> None: + """ + Print the graph vertices. + + Example: + >>> g = Graph() + >>> g.add_edge(0, 1) + >>> g.add_edge(0, 2) + >>> g.add_edge(1, 2) + >>> g.add_edge(2, 0) + >>> g.add_edge(2, 3) + >>> g.add_edge(3, 3) + >>> g.print_graph() + {0: [1, 2], 1: [2], 2: [0, 3], 3: [3]} + 0 -> 1 -> 2 + 1 -> 2 + 2 -> 0 -> 3 + 3 -> 3 + """ print(self.vertex) for i in self.vertex: print(i, " -> ", " -> ".join([str(j) for j in self.vertex[i]])) # for adding the edge between two vertices def add_edge(self, from_vertex: int, to_vertex: int) -> None: + """ + Add an edge between two vertices. + + :param from_vertex: The source vertex. + :param to_vertex: The destination vertex. + + Example: + >>> g = Graph() + >>> g.add_edge(0, 1) + >>> g.add_edge(0, 2) + >>> g.print_graph() + {0: [1, 2]} + 0 -> 1 -> 2 + """ # check if vertex is already present, if from_vertex in self.vertex: self.vertex[from_vertex].append(to_vertex) @@ -23,6 +55,21 @@ class Graph: self.vertex[from_vertex] = [to_vertex] def dfs(self) -> None: + """ + Perform depth-first search (DFS) traversal on the graph + and print the visited vertices. + + Example: + >>> g = Graph() + >>> g.add_edge(0, 1) + >>> g.add_edge(0, 2) + >>> g.add_edge(1, 2) + >>> g.add_edge(2, 0) + >>> g.add_edge(2, 3) + >>> g.add_edge(3, 3) + >>> g.dfs() + 0 1 2 3 + """ # visited array for storing already visited nodes visited = [False] * len(self.vertex) @@ -32,18 +79,41 @@ class Graph: self.dfs_recursive(i, visited) def dfs_recursive(self, start_vertex: int, visited: list) -> None: + """ + Perform a recursive depth-first search (DFS) traversal on the graph. + + :param start_vertex: The starting vertex for the traversal. + :param visited: A list to track visited vertices. + + Example: + >>> g = Graph() + >>> g.add_edge(0, 1) + >>> g.add_edge(0, 2) + >>> g.add_edge(1, 2) + >>> g.add_edge(2, 0) + >>> g.add_edge(2, 3) + >>> g.add_edge(3, 3) + >>> visited = [False] * len(g.vertex) + >>> g.dfs_recursive(0, visited) + 0 1 2 3 + """ # mark start vertex as visited visited[start_vertex] = True - print(start_vertex, end=" ") + print(start_vertex, end="") # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: + print(" ", end="") self.dfs_recursive(i, visited) if __name__ == "__main__": + import doctest + + doctest.testmod() + g = Graph() g.add_edge(0, 1) g.add_edge(0, 2) @@ -55,11 +125,3 @@ if __name__ == "__main__": g.print_graph() print("DFS:") g.dfs() - - # OUTPUT: - # 0 -> 1 -> 2 - # 1 -> 2 - # 2 -> 0 -> 3 - # 3 -> 3 - # DFS: - # 0 1 2 3 From 69707bf6939d63a93b0d4b278cc367c42a976c6d Mon Sep 17 00:00:00 2001 From: Dwarkadhish Kamthane <72198604+dwarka-9504@users.noreply.github.com> Date: Mon, 16 Oct 2023 13:51:03 +0530 Subject: [PATCH 146/306] Minimization of while loop in Armstrong Numbers (#9976) * Minimization of while loop in Armstrong Numbers The while loop is removed and simple length calculation is used so the task of minimization of while loop is achieved * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/armstrong_numbers.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/maths/armstrong_numbers.py b/maths/armstrong_numbers.py index 26709b428..e1c25d467 100644 --- a/maths/armstrong_numbers.py +++ b/maths/armstrong_numbers.py @@ -29,9 +29,7 @@ def armstrong_number(n: int) -> bool: number_of_digits = 0 temp = n # Calculation of digits of the number - while temp > 0: - number_of_digits += 1 - temp //= 10 + number_of_digits = len(str(n)) # Dividing number into separate digits and find Armstrong number temp = n while temp > 0: From 7acf4bf73b5a43bdb375f7a34da227bf6deeaf35 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 16 Oct 2023 16:16:09 +0200 Subject: [PATCH 147/306] Rename binary_tree_traversals.md to README.md (#10599) --- DIRECTORY.md | 3 +-- .../binary_tree/{binary_tree_traversals.md => README.md} | 0 2 files changed, 1 insertion(+), 2 deletions(-) rename data_structures/binary_tree/{binary_tree_traversals.md => README.md} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index cef1e06b7..65628be59 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -340,7 +340,6 @@ * [Longest Increasing Subsequence](dynamic_programming/longest_increasing_subsequence.py) * [Longest Increasing Subsequence O(Nlogn)](dynamic_programming/longest_increasing_subsequence_o(nlogn).py) * [Longest Palindromic Subsequence](dynamic_programming/longest_palindromic_subsequence.py) - * [Longest Sub Array](dynamic_programming/longest_sub_array.py) * [Matrix Chain Multiplication](dynamic_programming/matrix_chain_multiplication.py) * [Matrix Chain Order](dynamic_programming/matrix_chain_order.py) * [Max Non Adjacent Sum](dynamic_programming/max_non_adjacent_sum.py) @@ -486,6 +485,7 @@ * [Fractional Knapsack](greedy_methods/fractional_knapsack.py) * [Fractional Knapsack 2](greedy_methods/fractional_knapsack_2.py) * [Gas Station](greedy_methods/gas_station.py) + * [Minimum Coin Change](greedy_methods/minimum_coin_change.py) * [Minimum Waiting Time](greedy_methods/minimum_waiting_time.py) * [Optimal Merge Pattern](greedy_methods/optimal_merge_pattern.py) @@ -618,7 +618,6 @@ * [Gcd Of N Numbers](maths/gcd_of_n_numbers.py) * [Germain Primes](maths/germain_primes.py) * [Greatest Common Divisor](maths/greatest_common_divisor.py) - * [Greedy Coin Change](maths/greedy_coin_change.py) * [Hamming Numbers](maths/hamming_numbers.py) * [Hardy Ramanujanalgo](maths/hardy_ramanujanalgo.py) * [Harshad Numbers](maths/harshad_numbers.py) diff --git a/data_structures/binary_tree/binary_tree_traversals.md b/data_structures/binary_tree/README.md similarity index 100% rename from data_structures/binary_tree/binary_tree_traversals.md rename to data_structures/binary_tree/README.md From 3923e590d77979de31fabd4df34e69e8933e690d Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 16 Oct 2023 16:17:48 +0200 Subject: [PATCH 148/306] Tree_sort.py: Disable slow doctest (#10584) --- sorts/tree_sort.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sorts/tree_sort.py b/sorts/tree_sort.py index e63a3253b..dc95856f4 100644 --- a/sorts/tree_sort.py +++ b/sorts/tree_sort.py @@ -52,8 +52,9 @@ def tree_sort(arr: list[int]) -> tuple[int, ...]: (-4, 2, 5, 7, 9) >>> tree_sort([5, 6, 1, -1, 4, 37, 2, 7]) (-1, 1, 2, 4, 5, 6, 7, 37) - >>> tree_sort(range(10, -10, -1)) == tuple(sorted(range(10, -10, -1))) - True + + # >>> tree_sort(range(10, -10, -1)) == tuple(sorted(range(10, -10, -1))) + # True """ if len(arr) == 0: return tuple(arr) From c15dda405a26bd9cb1554a43598c4c85a6320d4c Mon Sep 17 00:00:00 2001 From: Saswat Susmoy <72549122+Saswatsusmoy@users.noreply.github.com> Date: Mon, 16 Oct 2023 20:13:53 +0530 Subject: [PATCH 149/306] Update basic_binary_tree.py (#10388) * Update basic_binary_tree.py * Update basic_binary_tree.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../binary_tree/basic_binary_tree.py | 173 +++++++++--------- 1 file changed, 91 insertions(+), 82 deletions(-) diff --git a/data_structures/binary_tree/basic_binary_tree.py b/data_structures/binary_tree/basic_binary_tree.py index 65dccf247..0439413d9 100644 --- a/data_structures/binary_tree/basic_binary_tree.py +++ b/data_structures/binary_tree/basic_binary_tree.py @@ -1,101 +1,110 @@ from __future__ import annotations +from collections.abc import Iterator +from dataclasses import dataclass + +@dataclass class Node: - """ - A Node has data variable and pointers to Nodes to its left and right. - """ + data: int + left: Node | None = None + right: Node | None = None - def __init__(self, data: int) -> None: - self.data = data - self.left: Node | None = None - self.right: Node | None = None + def __iter__(self) -> Iterator[int]: + if self.left: + yield from self.left + yield self.data + if self.right: + yield from self.right + + def __len__(self) -> int: + return sum(1 for _ in self) + + def is_full(self) -> bool: + if not self or (not self.left and not self.right): + return True + if self.left and self.right: + return self.left.is_full() and self.right.is_full() + return False -def display(tree: Node | None) -> None: # In Order traversal of the tree - """ - >>> root = Node(1) - >>> root.left = Node(0) - >>> root.right = Node(2) - >>> display(root) - 0 - 1 - 2 - >>> display(root.right) - 2 - """ - if tree: - display(tree.left) - print(tree.data) - display(tree.right) +@dataclass +class BinaryTree: + root: Node + def __iter__(self) -> Iterator[int]: + return iter(self.root) -def depth_of_tree(tree: Node | None) -> int: - """ - Recursive function that returns the depth of a binary tree. + def __len__(self) -> int: + return len(self.root) - >>> root = Node(0) - >>> depth_of_tree(root) - 1 - >>> root.left = Node(0) - >>> depth_of_tree(root) - 2 - >>> root.right = Node(0) - >>> depth_of_tree(root) - 2 - >>> root.left.right = Node(0) - >>> depth_of_tree(root) - 3 - >>> depth_of_tree(root.left) - 2 - """ - return 1 + max(depth_of_tree(tree.left), depth_of_tree(tree.right)) if tree else 0 + @classmethod + def small_tree(cls) -> BinaryTree: + """ + Return a small binary tree with 3 nodes. + >>> binary_tree = BinaryTree.small_tree() + >>> len(binary_tree) + 3 + >>> list(binary_tree) + [1, 2, 3] + """ + binary_tree = BinaryTree(Node(2)) + binary_tree.root.left = Node(1) + binary_tree.root.right = Node(3) + return binary_tree + @classmethod + def medium_tree(cls) -> BinaryTree: + """ + Return a medium binary tree with 3 nodes. + >>> binary_tree = BinaryTree.medium_tree() + >>> len(binary_tree) + 7 + >>> list(binary_tree) + [1, 2, 3, 4, 5, 6, 7] + """ + binary_tree = BinaryTree(Node(4)) + binary_tree.root.left = two = Node(2) + two.left = Node(1) + two.right = Node(3) + binary_tree.root.right = five = Node(5) + five.right = six = Node(6) + six.right = Node(7) + return binary_tree -def is_full_binary_tree(tree: Node) -> bool: - """ - Returns True if this is a full binary tree + def depth(self) -> int: + """ + Returns the depth of the tree - >>> root = Node(0) - >>> is_full_binary_tree(root) - True - >>> root.left = Node(0) - >>> is_full_binary_tree(root) - False - >>> root.right = Node(0) - >>> is_full_binary_tree(root) - True - >>> root.left.left = Node(0) - >>> is_full_binary_tree(root) - False - >>> root.right.right = Node(0) - >>> is_full_binary_tree(root) - False - """ - if not tree: - return True - if tree.left and tree.right: - return is_full_binary_tree(tree.left) and is_full_binary_tree(tree.right) - else: - return not tree.left and not tree.right + >>> BinaryTree(Node(1)).depth() + 1 + >>> BinaryTree.small_tree().depth() + 2 + >>> BinaryTree.medium_tree().depth() + 4 + """ + return self._depth(self.root) + def _depth(self, node: Node | None) -> int: # noqa: UP007 + if not node: + return 0 + return 1 + max(self._depth(node.left), self._depth(node.right)) -def main() -> None: # Main function for testing. - tree = Node(1) - tree.left = Node(2) - tree.right = Node(3) - tree.left.left = Node(4) - tree.left.right = Node(5) - tree.left.right.left = Node(6) - tree.right.left = Node(7) - tree.right.left.left = Node(8) - tree.right.left.left.right = Node(9) + def is_full(self) -> bool: + """ + Returns True if the tree is full - print(is_full_binary_tree(tree)) - print(depth_of_tree(tree)) - print("Tree is: ") - display(tree) + >>> BinaryTree(Node(1)).is_full() + True + >>> BinaryTree.small_tree().is_full() + True + >>> BinaryTree.medium_tree().is_full() + False + """ + return self.root.is_full() if __name__ == "__main__": - main() + import doctest + + doctest.testmod() From 5a1305b6fe98808bf534c54e12ac64c1e4e4ce0f Mon Sep 17 00:00:00 2001 From: ivan53 Date: Mon, 16 Oct 2023 07:48:26 -0700 Subject: [PATCH 150/306] Fix benchmark to test with the provided number instead on 25 (#10587) --- bit_manipulation/count_number_of_one_bits.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/bit_manipulation/count_number_of_one_bits.py b/bit_manipulation/count_number_of_one_bits.py index a1687503a..f0c9f9276 100644 --- a/bit_manipulation/count_number_of_one_bits.py +++ b/bit_manipulation/count_number_of_one_bits.py @@ -70,11 +70,13 @@ def benchmark() -> None: setup = "import __main__ as z" print(f"Benchmark when {number = }:") print(f"{get_set_bits_count_using_modulo_operator(number) = }") - timing = timeit("z.get_set_bits_count_using_modulo_operator(25)", setup=setup) + timing = timeit( + f"z.get_set_bits_count_using_modulo_operator({number})", setup=setup + ) print(f"timeit() runs in {timing} seconds") print(f"{get_set_bits_count_using_brian_kernighans_algorithm(number) = }") timing = timeit( - "z.get_set_bits_count_using_brian_kernighans_algorithm(25)", + f"z.get_set_bits_count_using_brian_kernighans_algorithm({number})", setup=setup, ) print(f"timeit() runs in {timing} seconds") From 778e2010d6ae89c61a93672e49b86041b6ca1108 Mon Sep 17 00:00:00 2001 From: Vinayak Upadhyay Date: Mon, 16 Oct 2023 22:16:44 +0530 Subject: [PATCH 151/306] Added functionality to calculate the diameter of given binary tree (#10526) * Added code to find diameter of given binary tree * Modified diameter_of_binary_tree file * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update diameter_of_binary_tree.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update diameter_of_binary_tree.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../binary_tree/diameter_of_binary_tree.py | 72 +++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 data_structures/binary_tree/diameter_of_binary_tree.py diff --git a/data_structures/binary_tree/diameter_of_binary_tree.py b/data_structures/binary_tree/diameter_of_binary_tree.py new file mode 100644 index 000000000..bbe70b028 --- /dev/null +++ b/data_structures/binary_tree/diameter_of_binary_tree.py @@ -0,0 +1,72 @@ +""" +The diameter/width of a tree is defined as the number of nodes on the longest path +between two end nodes. +""" +from __future__ import annotations + +from dataclasses import dataclass + + +@dataclass +class Node: + data: int + left: Node | None = None + right: Node | None = None + + def depth(self) -> int: + """ + >>> root = Node(1) + >>> root.depth() + 1 + >>> root.left = Node(2) + >>> root.depth() + 2 + >>> root.left.depth() + 1 + >>> root.right = Node(3) + >>> root.depth() + 2 + """ + left_depth = self.left.depth() if self.left else 0 + right_depth = self.right.depth() if self.right else 0 + return max(left_depth, right_depth) + 1 + + def diameter(self) -> int: + """ + >>> root = Node(1) + >>> root.diameter() + 1 + >>> root.left = Node(2) + >>> root.diameter() + 2 + >>> root.left.diameter() + 1 + >>> root.right = Node(3) + >>> root.diameter() + 3 + """ + left_depth = self.left.depth() if self.left else 0 + right_depth = self.right.depth() if self.right else 0 + return left_depth + right_depth + 1 + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + root = Node(1) + root.left = Node(2) + root.right = Node(3) + root.left.left = Node(4) + root.left.right = Node(5) + r""" + Constructed binary tree is + 1 + / \ + 2 3 + / \ + 4 5 + """ + print(f"{root.diameter() = }") # 4 + print(f"{root.left.diameter() = }") # 3 + print(f"{root.right.diameter() = }") # 1 From 1e468c1028e407ea38bd7e9511dc0f3d0d45a6e0 Mon Sep 17 00:00:00 2001 From: halfhearted <99018821+Arunsiva003@users.noreply.github.com> Date: Mon, 16 Oct 2023 22:42:33 +0530 Subject: [PATCH 152/306] Floor and ceil in Binary search tree added (#10432) * earliest deadline first scheduling algo added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * earliest deadline first scheduling algo added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ceil and floor and bst * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ceil and floor and bst 2 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ceil and floor and bst 3 * Update and rename floor_ceil_in_bst.py to floor_and_ceiling.py * Delete scheduling/shortest_deadline_first.py --------- Co-authored-by: ArunSiva Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../binary_tree/floor_and_ceiling.py | 87 +++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 data_structures/binary_tree/floor_and_ceiling.py diff --git a/data_structures/binary_tree/floor_and_ceiling.py b/data_structures/binary_tree/floor_and_ceiling.py new file mode 100644 index 000000000..f8a1adbd9 --- /dev/null +++ b/data_structures/binary_tree/floor_and_ceiling.py @@ -0,0 +1,87 @@ +""" +In a binary search tree (BST): +* The floor of key 'k' is the maximum value that is smaller than or equal to 'k'. +* The ceiling of key 'k' is the minimum value that is greater than or equal to 'k'. + +Reference: +https://bit.ly/46uB0a2 + +Author : Arunkumar +Date : 14th October 2023 +""" +from __future__ import annotations + +from collections.abc import Iterator +from dataclasses import dataclass + + +@dataclass +class Node: + key: int + left: Node | None = None + right: Node | None = None + + def __iter__(self) -> Iterator[int]: + if self.left: + yield from self.left + yield self.key + if self.right: + yield from self.right + + def __len__(self) -> int: + return sum(1 for _ in self) + + +def floor_ceiling(root: Node | None, key: int) -> tuple[int | None, int | None]: + """ + Find the floor and ceiling values for a given key in a Binary Search Tree (BST). + + Args: + root: The root of the binary search tree. + key: The key for which to find the floor and ceiling. + + Returns: + A tuple containing the floor and ceiling values, respectively. + + Examples: + >>> root = Node(10) + >>> root.left = Node(5) + >>> root.right = Node(20) + >>> root.left.left = Node(3) + >>> root.left.right = Node(7) + >>> root.right.left = Node(15) + >>> root.right.right = Node(25) + >>> tuple(root) + (3, 5, 7, 10, 15, 20, 25) + >>> floor_ceiling(root, 8) + (7, 10) + >>> floor_ceiling(root, 14) + (10, 15) + >>> floor_ceiling(root, -1) + (None, 3) + >>> floor_ceiling(root, 30) + (25, None) + """ + floor_val = None + ceiling_val = None + + while root: + if root.key == key: + floor_val = root.key + ceiling_val = root.key + break + + if key < root.key: + ceiling_val = root.key + root = root.left + else: + floor_val = root.key + root = root.right + + return floor_val, ceiling_val + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 922bbee80ce292ca27eee33d38e82ecf73e33dcd Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 16 Oct 2023 20:23:33 +0200 Subject: [PATCH 153/306] [pre-commit.ci] pre-commit autoupdate (#10613) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/abravalheri/validate-pyproject: v0.14 → v0.15](https://github.com/abravalheri/validate-pyproject/compare/v0.14...v0.15) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- DIRECTORY.md | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 84f4a7770..b3def463d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -46,7 +46,7 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.14 + rev: v0.15 hooks: - id: validate-pyproject diff --git a/DIRECTORY.md b/DIRECTORY.md index 65628be59..d878f1c79 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -192,10 +192,12 @@ * [Binary Tree Node Sum](data_structures/binary_tree/binary_tree_node_sum.py) * [Binary Tree Path Sum](data_structures/binary_tree/binary_tree_path_sum.py) * [Binary Tree Traversals](data_structures/binary_tree/binary_tree_traversals.py) + * [Diameter Of Binary Tree](data_structures/binary_tree/diameter_of_binary_tree.py) * [Diff Views Of Binary Tree](data_structures/binary_tree/diff_views_of_binary_tree.py) * [Distribute Coins](data_structures/binary_tree/distribute_coins.py) * [Fenwick Tree](data_structures/binary_tree/fenwick_tree.py) * [Flatten Binarytree To Linkedlist](data_structures/binary_tree/flatten_binarytree_to_linkedlist.py) + * [Floor And Ceiling](data_structures/binary_tree/floor_and_ceiling.py) * [Inorder Tree Traversal 2022](data_structures/binary_tree/inorder_tree_traversal_2022.py) * [Is Bst](data_structures/binary_tree/is_bst.py) * [Lazy Segment Tree](data_structures/binary_tree/lazy_segment_tree.py) From fcea18c9f0b68e2ba35c8f91bf0702d7c727c4df Mon Sep 17 00:00:00 2001 From: Adarsh Sidnal <97141741+Adarshsidnal@users.noreply.github.com> Date: Tue, 17 Oct 2023 04:26:14 +0530 Subject: [PATCH 154/306] Added an algorithm transfrom bst to greater sum tree (#9777) * Added an algorithm transfrom bst to greater sum tree * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update and rename transform_bst_sum_tree.py to is_sum_tree.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- data_structures/binary_tree/is_sum_tree.py | 161 +++++++++++++++++++++ 1 file changed, 161 insertions(+) create mode 100644 data_structures/binary_tree/is_sum_tree.py diff --git a/data_structures/binary_tree/is_sum_tree.py b/data_structures/binary_tree/is_sum_tree.py new file mode 100644 index 000000000..3f9cf1d56 --- /dev/null +++ b/data_structures/binary_tree/is_sum_tree.py @@ -0,0 +1,161 @@ +""" +Is a binary tree a sum tree where the value of every non-leaf node is equal to the sum +of the values of its left and right subtrees? +https://www.geeksforgeeks.org/check-if-a-given-binary-tree-is-sumtree +""" +from __future__ import annotations + +from collections.abc import Iterator +from dataclasses import dataclass + + +@dataclass +class Node: + data: int + left: Node | None = None + right: Node | None = None + + def __iter__(self) -> Iterator[int]: + """ + >>> root = Node(2) + >>> list(root) + [2] + >>> root.left = Node(1) + >>> tuple(root) + (1, 2) + """ + if self.left: + yield from self.left + yield self.data + if self.right: + yield from self.right + + def __len__(self) -> int: + """ + >>> root = Node(2) + >>> len(root) + 1 + >>> root.left = Node(1) + >>> len(root) + 2 + """ + return sum(1 for _ in self) + + @property + def is_sum_node(self) -> bool: + """ + >>> root = Node(3) + >>> root.is_sum_node + True + >>> root.left = Node(1) + >>> root.is_sum_node + False + >>> root.right = Node(2) + >>> root.is_sum_node + True + """ + if not self.left and not self.right: + return True # leaf nodes are considered sum nodes + left_sum = sum(self.left) if self.left else 0 + right_sum = sum(self.right) if self.right else 0 + return all( + ( + self.data == left_sum + right_sum, + self.left.is_sum_node if self.left else True, + self.right.is_sum_node if self.right else True, + ) + ) + + +@dataclass +class BinaryTree: + root: Node + + def __iter__(self) -> Iterator[int]: + """ + >>> list(BinaryTree.build_a_tree()) + [1, 2, 7, 11, 15, 29, 35, 40] + """ + return iter(self.root) + + def __len__(self) -> int: + """ + >>> len(BinaryTree.build_a_tree()) + 8 + """ + return len(self.root) + + def __str__(self) -> str: + """ + Returns a string representation of the inorder traversal of the binary tree. + + >>> str(list(BinaryTree.build_a_tree())) + '[1, 2, 7, 11, 15, 29, 35, 40]' + """ + return str(list(self)) + + @property + def is_sum_tree(self) -> bool: + """ + >>> BinaryTree.build_a_tree().is_sum_tree + False + >>> BinaryTree.build_a_sum_tree().is_sum_tree + True + """ + return self.root.is_sum_node + + @classmethod + def build_a_tree(cls) -> BinaryTree: + r""" + Create a binary tree with the specified structure: + 11 + / \ + 2 29 + / \ / \ + 1 7 15 40 + \ + 35 + >>> list(BinaryTree.build_a_tree()) + [1, 2, 7, 11, 15, 29, 35, 40] + """ + tree = BinaryTree(Node(11)) + root = tree.root + root.left = Node(2) + root.right = Node(29) + root.left.left = Node(1) + root.left.right = Node(7) + root.right.left = Node(15) + root.right.right = Node(40) + root.right.right.left = Node(35) + return tree + + @classmethod + def build_a_sum_tree(cls) -> BinaryTree: + r""" + Create a binary tree with the specified structure: + 26 + / \ + 10 3 + / \ \ + 4 6 3 + >>> list(BinaryTree.build_a_sum_tree()) + [4, 10, 6, 26, 3, 3] + """ + tree = BinaryTree(Node(26)) + root = tree.root + root.left = Node(10) + root.right = Node(3) + root.left.left = Node(4) + root.left.right = Node(6) + root.right.right = Node(3) + return tree + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + tree = BinaryTree.build_a_tree() + print(f"{tree} has {len(tree)} nodes and {tree.is_sum_tree = }.") + tree = BinaryTree.build_a_sum_tree() + print(f"{tree} has {len(tree)} nodes and {tree.is_sum_tree = }.") From 5f629b60499cfb3ac27f6520bf947764b5b45c28 Mon Sep 17 00:00:00 2001 From: Sandeepa Dilshan Alagiyawanna <108791571+SandeepaDilshanAlagiyawanna@users.noreply.github.com> Date: Tue, 17 Oct 2023 04:47:49 +0530 Subject: [PATCH 155/306] Optimize and_gate and nand_gate (#10591) * Added more optimized sudoku solver algorithm * Added more optimized sudoku solver algorithm and File Renamed * and_gate is Optimized * and_gate is Optimized * and_gate is Optimized * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- boolean_algebra/and_gate.py | 2 +- boolean_algebra/nand_gate.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/boolean_algebra/and_gate.py b/boolean_algebra/and_gate.py index f0fd45c9f..6ae66b5b0 100644 --- a/boolean_algebra/and_gate.py +++ b/boolean_algebra/and_gate.py @@ -29,7 +29,7 @@ def and_gate(input_1: int, input_2: int) -> int: >>> and_gate(1, 1) 1 """ - return int((input_1, input_2).count(0) == 0) + return int(input_1 and input_2) if __name__ == "__main__": diff --git a/boolean_algebra/nand_gate.py b/boolean_algebra/nand_gate.py index 80f9d12db..ea7a6815d 100644 --- a/boolean_algebra/nand_gate.py +++ b/boolean_algebra/nand_gate.py @@ -27,7 +27,7 @@ def nand_gate(input_1: int, input_2: int) -> int: >>> nand_gate(1, 1) 0 """ - return int((input_1, input_2).count(0) != 0) + return int(not (input_1 and input_2)) if __name__ == "__main__": From b5786c87d820cc4d68707731df0812507063bf8b Mon Sep 17 00:00:00 2001 From: aryandgandhi <44215148+aryandgandhi@users.noreply.github.com> Date: Mon, 16 Oct 2023 20:25:07 -0500 Subject: [PATCH 156/306] update segmenttree docstrings Fixes #9943 (#9975) * update docstrings * update docstrings * update docstrings --- data_structures/binary_tree/segment_tree.py | 41 ++++++++++++++++++++- 1 file changed, 39 insertions(+), 2 deletions(-) diff --git a/data_structures/binary_tree/segment_tree.py b/data_structures/binary_tree/segment_tree.py index 5f822407d..3b0b32946 100644 --- a/data_structures/binary_tree/segment_tree.py +++ b/data_structures/binary_tree/segment_tree.py @@ -3,7 +3,8 @@ import math class SegmentTree: def __init__(self, a): - self.N = len(a) + self.A = a + self.N = len(self.A) self.st = [0] * ( 4 * self.N ) # approximate the overall size of segment tree with array N @@ -11,14 +12,32 @@ class SegmentTree: self.build(1, 0, self.N - 1) def left(self, idx): + """ + Returns the left child index for a given index in a binary tree. + + >>> s = SegmentTree([1, 2, 3]) + >>> s.left(1) + 2 + >>> s.left(2) + 4 + """ return idx * 2 def right(self, idx): + """ + Returns the right child index for a given index in a binary tree. + + >>> s = SegmentTree([1, 2, 3]) + >>> s.right(1) + 3 + >>> s.right(2) + 5 + """ return idx * 2 + 1 def build(self, idx, l, r): # noqa: E741 if l == r: - self.st[idx] = A[l] + self.st[idx] = self.A[l] else: mid = (l + r) // 2 self.build(self.left(idx), l, mid) @@ -26,6 +45,15 @@ class SegmentTree: self.st[idx] = max(self.st[self.left(idx)], self.st[self.right(idx)]) def update(self, a, b, val): + """ + Update the values in the segment tree in the range [a,b] with the given value. + + >>> s = SegmentTree([1, 2, 3, 4, 5]) + >>> s.update(2, 4, 10) + True + >>> s.query(1, 5) + 10 + """ return self.update_recursive(1, 0, self.N - 1, a - 1, b - 1, val) def update_recursive(self, idx, l, r, a, b, val): # noqa: E741 @@ -44,6 +72,15 @@ class SegmentTree: return True def query(self, a, b): + """ + Query the maximum value in the range [a,b]. + + >>> s = SegmentTree([1, 2, 3, 4, 5]) + >>> s.query(1, 3) + 3 + >>> s.query(1, 5) + 5 + """ return self.query_recursive(1, 0, self.N - 1, a - 1, b - 1) def query_recursive(self, idx, l, r, a, b): # noqa: E741 From 00165a5fb2d125c7e6ab33e424bdcac8dec2b5b6 Mon Sep 17 00:00:00 2001 From: Saurabh Mahapatra <98408932+its-100rabh@users.noreply.github.com> Date: Tue, 17 Oct 2023 12:06:12 +0530 Subject: [PATCH 157/306] Added test cases to join.py (#10629) * Added test cases to join.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- strings/join.py | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/strings/join.py b/strings/join.py index 739856c1a..5c02f65a2 100644 --- a/strings/join.py +++ b/strings/join.py @@ -1,10 +1,21 @@ """ -Program to join a list of strings with a given separator +Program to join a list of strings with a separator """ def join(separator: str, separated: list[str]) -> str: """ + Joins a list of strings using a separator + and returns the result. + + :param separator: Separator to be used + for joining the strings. + :param separated: List of strings to be joined. + + :return: Joined string with the specified separator. + + Examples: + >>> join("", ["a", "b", "c", "d"]) 'abcd' >>> join("#", ["a", "b", "c", "d"]) @@ -13,16 +24,27 @@ def join(separator: str, separated: list[str]) -> str: 'a' >>> join(" ", ["You", "are", "amazing!"]) 'You are amazing!' + + This example should raise an + exception for non-string elements: >>> join("#", ["a", "b", "c", 1]) Traceback (most recent call last): ... - Exception: join() accepts only strings to be joined + Exception: join() accepts only strings + + Additional test case with a different separator: + >>> join("-", ["apple", "banana", "cherry"]) + 'apple-banana-cherry' """ + joined = "" for word_or_phrase in separated: if not isinstance(word_or_phrase, str): - raise Exception("join() accepts only strings to be joined") + raise Exception("join() accepts only strings") joined += word_or_phrase + separator + + # Remove the trailing separator + # by stripping it from the result return joined.strip(separator) From c6c3bd339947eb6f10f77754f34a49915799c82f Mon Sep 17 00:00:00 2001 From: Kushagra Agarwal <94402194+developer-kush@users.noreply.github.com> Date: Tue, 17 Oct 2023 12:40:24 +0530 Subject: [PATCH 158/306] Hacktoberfest: Added Octal Number to Hexadecimal Number Conversion Algorithm (#10533) * Added Octal to Hexadecimal Conversion program under 'conversions' directory * Update conversions/octal_to_hexadecimal.py fix: minor improvement to directly return hexadecimal value Co-authored-by: Tianyi Zheng * Update conversions/octal_to_hexadecimal.py fix: improvement updates to octal to hexadecimal Co-authored-by: Tianyi Zheng * Update conversions/octal_to_hexadecimal.py fix: Readablility improvements to octal to hexadecimal convertor Co-authored-by: Tianyi Zheng * Update conversions/octal_to_hexadecimal.py fix: readability improvements in octal_to_hexadecimal.py Co-authored-by: Tianyi Zheng * Update conversions/octal_to_hexadecimal.py fix: readability improvements in octal_to_hexadecimal.py Co-authored-by: Tianyi Zheng * fix: Fixed all the errors in octal_to_hexadecimal.py after commiting suggested changes * fix: modified the prefix of hex numbers to the '0x' standard in octal_to_hexadecimal.py --------- Co-authored-by: Tianyi Zheng --- conversions/octal_to_hexadecimal.py | 65 +++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 conversions/octal_to_hexadecimal.py diff --git a/conversions/octal_to_hexadecimal.py b/conversions/octal_to_hexadecimal.py new file mode 100644 index 000000000..0615d79b5 --- /dev/null +++ b/conversions/octal_to_hexadecimal.py @@ -0,0 +1,65 @@ +def octal_to_hex(octal: str) -> str: + """ + Convert an Octal number to Hexadecimal number. + For more information: https://en.wikipedia.org/wiki/Octal + + >>> octal_to_hex("100") + '0x40' + >>> octal_to_hex("235") + '0x9D' + >>> octal_to_hex(17) + Traceback (most recent call last): + ... + TypeError: Expected a string as input + >>> octal_to_hex("Av") + Traceback (most recent call last): + ... + ValueError: Not a Valid Octal Number + >>> octal_to_hex("") + Traceback (most recent call last): + ... + ValueError: Empty string was passed to the function + """ + + if not isinstance(octal, str): + raise TypeError("Expected a string as input") + if octal.startswith("0o"): + octal = octal[2:] + if octal == "": + raise ValueError("Empty string was passed to the function") + if any(char not in "01234567" for char in octal): + raise ValueError("Not a Valid Octal Number") + + decimal = 0 + for char in octal: + decimal <<= 3 + decimal |= int(char) + + hex_char = "0123456789ABCDEF" + + revhex = "" + while decimal: + revhex += hex_char[decimal & 15] + decimal >>= 4 + + return "0x" + revhex[::-1] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + nums = ["030", "100", "247", "235", "007"] + + ## Main Tests + + for num in nums: + hexadecimal = octal_to_hex(num) + expected = "0x" + hex(int(num, 8))[2:].upper() + + assert hexadecimal == expected + + print(f"Hex of '0o{num}' is: {hexadecimal}") + print(f"Expected was: {expected}") + print("---") From ac3bd1032c02ff5c2f6eb16f2bf5a1b24d106d1c Mon Sep 17 00:00:00 2001 From: ojas wani <52542740+ojas-wani@users.noreply.github.com> Date: Tue, 17 Oct 2023 02:25:25 -0700 Subject: [PATCH 159/306] Add matrix_multiplication (#10045) * added laplacian_filter file * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * required changes to laplacian file * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * changed laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update laplacian_filter.py * update laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * changed laplacian_filter.py * changed laplacian_filter.py * add matrix_multiplication.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update matrix_multiplication * update matrix_multiplication * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * make changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * update * updates * resolve conflict * add doctest * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * make changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update laplacian.py * add doctests * more doctest added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * try to resolve ruff error * try to reslve ruff error * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update doctest * attemp - resolve ruff error * resolve build error * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * resolve build issue * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update build * doctest update * update doctest * update doctest * update doctest * fix ruff error * file location changed * Delete digital_image_processing/filters/laplacian_filter.py * Create laplacian_filter.py * Update matrix_multiplication_recursion.py * Update matrix_multiplication_recursion.py * Update matrix_multiplication_recursion.py * Update matrix_multiplication_recursion.py * Update matrix_multiplication_recursion.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- matrix/matrix_multiplication_recursion.py | 180 ++++++++++++++++++++++ 1 file changed, 180 insertions(+) create mode 100644 matrix/matrix_multiplication_recursion.py diff --git a/matrix/matrix_multiplication_recursion.py b/matrix/matrix_multiplication_recursion.py new file mode 100644 index 000000000..287142480 --- /dev/null +++ b/matrix/matrix_multiplication_recursion.py @@ -0,0 +1,180 @@ +# @Author : ojas-wani +# @File : matrix_multiplication_recursion.py +# @Date : 10/06/2023 + + +""" +Perform matrix multiplication using a recursive algorithm. +https://en.wikipedia.org/wiki/Matrix_multiplication +""" +# type Matrix = list[list[int]] # psf/black currenttly fails on this line +Matrix = list[list[int]] + +matrix_1_to_4 = [ + [1, 2], + [3, 4], +] + +matrix_5_to_8 = [ + [5, 6], + [7, 8], +] + +matrix_5_to_9_high = [ + [5, 6], + [7, 8], + [9], +] + +matrix_5_to_9_wide = [ + [5, 6], + [7, 8, 9], +] + +matrix_count_up = [ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12], + [13, 14, 15, 16], +] + +matrix_unordered = [ + [5, 8, 1, 2], + [6, 7, 3, 0], + [4, 5, 9, 1], + [2, 6, 10, 14], +] +matrices = ( + matrix_1_to_4, + matrix_5_to_8, + matrix_5_to_9_high, + matrix_5_to_9_wide, + matrix_count_up, + matrix_unordered, +) + + +def is_square(matrix: Matrix) -> bool: + """ + >>> is_square([]) + True + >>> is_square(matrix_1_to_4) + True + >>> is_square(matrix_5_to_9_high) + False + """ + len_matrix = len(matrix) + return all(len(row) == len_matrix for row in matrix) + + +def matrix_multiply(matrix_a: Matrix, matrix_b: Matrix) -> Matrix: + """ + >>> matrix_multiply(matrix_1_to_4, matrix_5_to_8) + [[19, 22], [43, 50]] + """ + return [ + [sum(a * b for a, b in zip(row, col)) for col in zip(*matrix_b)] + for row in matrix_a + ] + + +def matrix_multiply_recursive(matrix_a: Matrix, matrix_b: Matrix) -> Matrix: + """ + :param matrix_a: A square Matrix. + :param matrix_b: Another square Matrix with the same dimensions as matrix_a. + :return: Result of matrix_a * matrix_b. + :raises ValueError: If the matrices cannot be multiplied. + + >>> matrix_multiply_recursive([], []) + [] + >>> matrix_multiply_recursive(matrix_1_to_4, matrix_5_to_8) + [[19, 22], [43, 50]] + >>> matrix_multiply_recursive(matrix_count_up, matrix_unordered) + [[37, 61, 74, 61], [105, 165, 166, 129], [173, 269, 258, 197], [241, 373, 350, 265]] + >>> matrix_multiply_recursive(matrix_1_to_4, matrix_5_to_9_wide) + Traceback (most recent call last): + ... + ValueError: Invalid matrix dimensions + >>> matrix_multiply_recursive(matrix_1_to_4, matrix_5_to_9_high) + Traceback (most recent call last): + ... + ValueError: Invalid matrix dimensions + >>> matrix_multiply_recursive(matrix_1_to_4, matrix_count_up) + Traceback (most recent call last): + ... + ValueError: Invalid matrix dimensions + """ + if not matrix_a or not matrix_b: + return [] + if not all( + (len(matrix_a) == len(matrix_b), is_square(matrix_a), is_square(matrix_b)) + ): + raise ValueError("Invalid matrix dimensions") + + # Initialize the result matrix with zeros + result = [[0] * len(matrix_b[0]) for _ in range(len(matrix_a))] + + # Recursive multiplication of matrices + def multiply( + i_loop: int, + j_loop: int, + k_loop: int, + matrix_a: Matrix, + matrix_b: Matrix, + result: Matrix, + ) -> None: + """ + :param matrix_a: A square Matrix. + :param matrix_b: Another square Matrix with the same dimensions as matrix_a. + :param result: Result matrix + :param i: Index used for iteration during multiplication. + :param j: Index used for iteration during multiplication. + :param k: Index used for iteration during multiplication. + >>> 0 > 1 # Doctests in inner functions are never run + True + """ + if i_loop >= len(matrix_a): + return + if j_loop >= len(matrix_b[0]): + return multiply(i_loop + 1, 0, 0, matrix_a, matrix_b, result) + if k_loop >= len(matrix_b): + return multiply(i_loop, j_loop + 1, 0, matrix_a, matrix_b, result) + result[i_loop][j_loop] += matrix_a[i_loop][k_loop] * matrix_b[k_loop][j_loop] + return multiply(i_loop, j_loop, k_loop + 1, matrix_a, matrix_b, result) + + # Perform the recursive matrix multiplication + multiply(0, 0, 0, matrix_a, matrix_b, result) + return result + + +if __name__ == "__main__": + from doctest import testmod + + failure_count, test_count = testmod() + if not failure_count: + matrix_a = matrices[0] + for matrix_b in matrices[1:]: + print("Multiplying:") + for row in matrix_a: + print(row) + print("By:") + for row in matrix_b: + print(row) + print("Result:") + try: + result = matrix_multiply_recursive(matrix_a, matrix_b) + for row in result: + print(row) + assert result == matrix_multiply(matrix_a, matrix_b) + except ValueError as e: + print(f"{e!r}") + print() + matrix_a = matrix_b + + print("Benchmark:") + from functools import partial + from timeit import timeit + + mytimeit = partial(timeit, globals=globals(), number=100_000) + for func in ("matrix_multiply", "matrix_multiply_recursive"): + print(f"{func:>25}(): {mytimeit(f'{func}(matrix_count_up, matrix_unordered)')}") From 72bd653e04a944f51ae6c047204b62d8a07db9d4 Mon Sep 17 00:00:00 2001 From: RaymondDashWu <33266041+RaymondDashWu@users.noreply.github.com> Date: Tue, 17 Oct 2023 07:57:33 -0700 Subject: [PATCH 160/306] Test cases for all_combinations (#10633) * [ADD] Test cases for all_combinations * [DEL] documentation reverted b/c redundant * Update all_combinations.py --------- Co-authored-by: Christian Clauss --- backtracking/all_combinations.py | 49 +++++++++++++++++++++++++------- 1 file changed, 39 insertions(+), 10 deletions(-) diff --git a/backtracking/all_combinations.py b/backtracking/all_combinations.py index bde60f032..ecbcc5882 100644 --- a/backtracking/all_combinations.py +++ b/backtracking/all_combinations.py @@ -1,15 +1,40 @@ """ In this problem, we want to determine all possible combinations of k numbers out of 1 ... n. We use backtracking to solve this problem. - Time complexity: O(C(n,k)) which is O(n choose k) = O((n!/(k! * (n - k)!))) + + Time complexity: O(C(n,k)) which is O(n choose k) = O((n!/(k! * (n - k)!))), """ from __future__ import annotations +from itertools import combinations + + +def combination_lists(n: int, k: int) -> list[list[int]]: + """ + >>> combination_lists(n=4, k=2) + [[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]] + """ + return [list(x) for x in combinations(range(1, n + 1), k)] + def generate_all_combinations(n: int, k: int) -> list[list[int]]: """ >>> generate_all_combinations(n=4, k=2) [[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]] + >>> generate_all_combinations(n=0, k=0) + [[]] + >>> generate_all_combinations(n=10, k=-1) + Traceback (most recent call last): + ... + RecursionError: maximum recursion depth exceeded + >>> generate_all_combinations(n=-1, k=10) + [] + >>> generate_all_combinations(n=5, k=4) + [[1, 2, 3, 4], [1, 2, 3, 5], [1, 2, 4, 5], [1, 3, 4, 5], [2, 3, 4, 5]] + >>> from itertools import combinations + >>> all(generate_all_combinations(n, k) == combination_lists(n, k) + ... for n in range(1, 6) for k in range(1, 6)) + True """ result: list[list[int]] = [] @@ -34,13 +59,17 @@ def create_all_state( current_list.pop() -def print_all_state(total_list: list[list[int]]) -> None: - for i in total_list: - print(*i) - - if __name__ == "__main__": - n = 4 - k = 2 - total_list = generate_all_combinations(n, k) - print_all_state(total_list) + from doctest import testmod + + testmod() + print(generate_all_combinations(n=4, k=2)) + tests = ((n, k) for n in range(1, 5) for k in range(1, 5)) + for n, k in tests: + print(n, k, generate_all_combinations(n, k) == combination_lists(n, k)) + + print("Benchmark:") + from timeit import timeit + + for func in ("combination_lists", "generate_all_combinations"): + print(f"{func:>25}(): {timeit(f'{func}(n=4, k = 2)', globals=globals())}") From 09c2b2d006e3ca217f2ef082d62a0c35560667ef Mon Sep 17 00:00:00 2001 From: Anubhavpandey27 <61093307+Anubhavpandey27@users.noreply.github.com> Date: Tue, 17 Oct 2023 22:37:40 +0530 Subject: [PATCH 161/306] Add arrays/sudoku_solver.py (#10623) * Create Sudoku_Solver Each of the digits 1-9 must occur exactly once in each row. Each of the digits 1-9 must occur exactly once in each column. Each of the digits 1-9 must occur exactly once in each of the 9 3x3 sub-boxes of the grid. The '.' character indicates empty cells. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Rename Sudoku_Solver to sudoku_solver.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update sudoku_solver.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- data_structures/arrays/sudoku_solver.py | 220 ++++++++++++++++++++++++ 1 file changed, 220 insertions(+) create mode 100644 data_structures/arrays/sudoku_solver.py diff --git a/data_structures/arrays/sudoku_solver.py b/data_structures/arrays/sudoku_solver.py new file mode 100644 index 000000000..8d38bd729 --- /dev/null +++ b/data_structures/arrays/sudoku_solver.py @@ -0,0 +1,220 @@ +""" +Please do not modify this file! It is published at https://norvig.com/sudoku.html with +only minimal changes to work with modern versions of Python. If you have improvements, +please make them in a separate file. +""" +import random +import time + + +def cross(items_a, items_b): + "Cross product of elements in A and elements in B." + return [a + b for a in items_a for b in items_b] + + +digits = "123456789" +rows = "ABCDEFGHI" +cols = digits +squares = cross(rows, cols) +unitlist = ( + [cross(rows, c) for c in cols] + + [cross(r, cols) for r in rows] + + [cross(rs, cs) for rs in ("ABC", "DEF", "GHI") for cs in ("123", "456", "789")] +) +units = {s: [u for u in unitlist if s in u] for s in squares} +peers = {s: set(sum(units[s], [])) - {s} for s in squares} + + +def test(): + "A set of unit tests." + assert len(squares) == 81 + assert len(unitlist) == 27 + assert all(len(units[s]) == 3 for s in squares) + assert all(len(peers[s]) == 20 for s in squares) + assert units["C2"] == [ + ["A2", "B2", "C2", "D2", "E2", "F2", "G2", "H2", "I2"], + ["C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9"], + ["A1", "A2", "A3", "B1", "B2", "B3", "C1", "C2", "C3"], + ] + # fmt: off + assert peers["C2"] == { + "A2", "B2", "D2", "E2", "F2", "G2", "H2", "I2", "C1", "C3", + "C4", "C5", "C6", "C7", "C8", "C9", "A1", "A3", "B1", "B3" + } + # fmt: on + print("All tests pass.") + + +def parse_grid(grid): + """Convert grid to a dict of possible values, {square: digits}, or + return False if a contradiction is detected.""" + ## To start, every square can be any digit; then assign values from the grid. + values = {s: digits for s in squares} + for s, d in grid_values(grid).items(): + if d in digits and not assign(values, s, d): + return False ## (Fail if we can't assign d to square s.) + return values + + +def grid_values(grid): + "Convert grid into a dict of {square: char} with '0' or '.' for empties." + chars = [c for c in grid if c in digits or c in "0."] + assert len(chars) == 81 + return dict(zip(squares, chars)) + + +def assign(values, s, d): + """Eliminate all the other values (except d) from values[s] and propagate. + Return values, except return False if a contradiction is detected.""" + other_values = values[s].replace(d, "") + if all(eliminate(values, s, d2) for d2 in other_values): + return values + else: + return False + + +def eliminate(values, s, d): + """Eliminate d from values[s]; propagate when values or places <= 2. + Return values, except return False if a contradiction is detected.""" + if d not in values[s]: + return values ## Already eliminated + values[s] = values[s].replace(d, "") + ## (1) If a square s is reduced to one value d2, then eliminate d2 from the peers. + if len(values[s]) == 0: + return False ## Contradiction: removed last value + elif len(values[s]) == 1: + d2 = values[s] + if not all(eliminate(values, s2, d2) for s2 in peers[s]): + return False + ## (2) If a unit u is reduced to only one place for a value d, then put it there. + for u in units[s]: + dplaces = [s for s in u if d in values[s]] + if len(dplaces) == 0: + return False ## Contradiction: no place for this value + elif len(dplaces) == 1: + # d can only be in one place in unit; assign it there + if not assign(values, dplaces[0], d): + return False + return values + + +def display(values): + "Display these values as a 2-D grid." + width = 1 + max(len(values[s]) for s in squares) + line = "+".join(["-" * (width * 3)] * 3) + for r in rows: + print( + "".join( + values[r + c].center(width) + ("|" if c in "36" else "") for c in cols + ) + ) + if r in "CF": + print(line) + print() + + +def solve(grid): + return search(parse_grid(grid)) + + +def some(seq): + "Return some element of seq that is true." + for e in seq: + if e: + return e + return False + + +def search(values): + "Using depth-first search and propagation, try all possible values." + if values is False: + return False ## Failed earlier + if all(len(values[s]) == 1 for s in squares): + return values ## Solved! + ## Chose the unfilled square s with the fewest possibilities + n, s = min((len(values[s]), s) for s in squares if len(values[s]) > 1) + return some(search(assign(values.copy(), s, d)) for d in values[s]) + + +def solve_all(grids, name="", showif=0.0): + """Attempt to solve a sequence of grids. Report results. + When showif is a number of seconds, display puzzles that take longer. + When showif is None, don't display any puzzles.""" + + def time_solve(grid): + start = time.monotonic() + values = solve(grid) + t = time.monotonic() - start + ## Display puzzles that take long enough + if showif is not None and t > showif: + display(grid_values(grid)) + if values: + display(values) + print("(%.5f seconds)\n" % t) + return (t, solved(values)) + + times, results = zip(*[time_solve(grid) for grid in grids]) + if (n := len(grids)) > 1: + print( + "Solved %d of %d %s puzzles (avg %.2f secs (%d Hz), max %.2f secs)." + % (sum(results), n, name, sum(times) / n, n / sum(times), max(times)) + ) + + +def solved(values): + "A puzzle is solved if each unit is a permutation of the digits 1 to 9." + + def unitsolved(unit): + return {values[s] for s in unit} == set(digits) + + return values is not False and all(unitsolved(unit) for unit in unitlist) + + +def from_file(filename, sep="\n"): + "Parse a file into a list of strings, separated by sep." + return open(filename).read().strip().split(sep) # noqa: SIM115 + + +def random_puzzle(assignments=17): + """Make a random puzzle with N or more assignments. Restart on contradictions. + Note the resulting puzzle is not guaranteed to be solvable, but empirically + about 99.8% of them are solvable. Some have multiple solutions.""" + values = {s: digits for s in squares} + for s in shuffled(squares): + if not assign(values, s, random.choice(values[s])): + break + ds = [values[s] for s in squares if len(values[s]) == 1] + if len(ds) >= assignments and len(set(ds)) >= 8: + return "".join(values[s] if len(values[s]) == 1 else "." for s in squares) + return random_puzzle(assignments) ## Give up and make a new puzzle + + +def shuffled(seq): + "Return a randomly shuffled copy of the input sequence." + seq = list(seq) + random.shuffle(seq) + return seq + + +grid1 = ( + "003020600900305001001806400008102900700000008006708200002609500800203009005010300" +) +grid2 = ( + "4.....8.5.3..........7......2.....6.....8.4......1.......6.3.7.5..2.....1.4......" +) +hard1 = ( + ".....6....59.....82....8....45........3........6..3.54...325..6.................." +) + +if __name__ == "__main__": + test() + # solve_all(from_file("easy50.txt", '========'), "easy", None) + # solve_all(from_file("top95.txt"), "hard", None) + # solve_all(from_file("hardest.txt"), "hardest", None) + solve_all([random_puzzle() for _ in range(99)], "random", 100.0) + for puzzle in (grid1, grid2): # , hard1): # Takes 22 sec to solve on my M1 Mac. + display(parse_grid(puzzle)) + start = time.monotonic() + solve(puzzle) + t = time.monotonic() - start + print("Solved: %.5f sec" % t) From 9de1c49fe13f009e08dcf5009a798bef43f2230b Mon Sep 17 00:00:00 2001 From: Marek Mazij <112333347+Mrk-Mzj@users.noreply.github.com> Date: Tue, 17 Oct 2023 20:24:16 +0200 Subject: [PATCH 162/306] feat: Polish ID (PESEL) checker added (#10618) * feat: Polish ID (PESEL) checker added * refactor: 'sum' variable renamed to 'subtotal' * style: typos * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- strings/is_polish_national_id.py | 92 ++++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) create mode 100644 strings/is_polish_national_id.py diff --git a/strings/is_polish_national_id.py b/strings/is_polish_national_id.py new file mode 100644 index 000000000..8b463a245 --- /dev/null +++ b/strings/is_polish_national_id.py @@ -0,0 +1,92 @@ +def is_polish_national_id(input_str: str) -> bool: + """ + Verification of the correctness of the PESEL number. + www-gov-pl.translate.goog/web/gov/czym-jest-numer-pesel?_x_tr_sl=auto&_x_tr_tl=en + + PESEL can start with 0, that's why we take str as input, + but convert it to int for some calculations. + + + >>> is_polish_national_id(123) + Traceback (most recent call last): + ... + ValueError: Expected str as input, found + + >>> is_polish_national_id("abc") + Traceback (most recent call last): + ... + ValueError: Expected number as input + + >>> is_polish_national_id("02070803628") # correct PESEL + True + + >>> is_polish_national_id("02150803629") # wrong month + False + + >>> is_polish_national_id("02075503622") # wrong day + False + + >>> is_polish_national_id("-99012212349") # wrong range + False + + >>> is_polish_national_id("990122123499999") # wrong range + False + + >>> is_polish_national_id("02070803621") # wrong checksum + False + """ + + # check for invalid input type + if not isinstance(input_str, str): + msg = f"Expected str as input, found {type(input_str)}" + raise ValueError(msg) + + # check if input can be converted to int + try: + input_int = int(input_str) + except ValueError: + msg = "Expected number as input" + raise ValueError(msg) + + # check number range + if not 10100000 <= input_int <= 99923199999: + return False + + # check month correctness + month = int(input_str[2:4]) + + if ( + month not in range(1, 13) # year 1900-1999 + and month not in range(21, 33) # 2000-2099 + and month not in range(41, 53) # 2100-2199 + and month not in range(61, 73) # 2200-2299 + and month not in range(81, 93) # 1800-1899 + ): + return False + + # check day correctness + day = int(input_str[4:6]) + + if day not in range(1, 32): + return False + + # check the checksum + multipliers = [1, 3, 7, 9, 1, 3, 7, 9, 1, 3] + subtotal = 0 + + digits_to_check = str(input_str)[:-1] # cut off the checksum + + for index, digit in enumerate(digits_to_check): + # Multiply corresponding digits and multipliers. + # In case of a double-digit result, add only the last digit. + subtotal += (int(digit) * multipliers[index]) % 10 + + checksum = 10 - subtotal % 10 + + return checksum == input_int % 10 + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 9da6f48b46f41c6361416c259dcfec531fb39a01 Mon Sep 17 00:00:00 2001 From: Manmita Das <34617961+manmita@users.noreply.github.com> Date: Wed, 18 Oct 2023 04:07:57 +0530 Subject: [PATCH 163/306] Add binary_coded_decimal.py (#10656) * added decimal to bcd sequence * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated with fixes * Update and rename bcd_sequence.py to binary_coded_decimal.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update binary_coded_decimal.py * Update binary_coded_decimal.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- bit_manipulation/binary_coded_decimal.py | 29 ++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 bit_manipulation/binary_coded_decimal.py diff --git a/bit_manipulation/binary_coded_decimal.py b/bit_manipulation/binary_coded_decimal.py new file mode 100644 index 000000000..676fd6d54 --- /dev/null +++ b/bit_manipulation/binary_coded_decimal.py @@ -0,0 +1,29 @@ +def binary_coded_decimal(number: int) -> str: + """ + Find binary coded decimal (bcd) of integer base 10. + Each digit of the number is represented by a 4-bit binary. + Example: + >>> binary_coded_decimal(-2) + '0b0000' + >>> binary_coded_decimal(-1) + '0b0000' + >>> binary_coded_decimal(0) + '0b0000' + >>> binary_coded_decimal(3) + '0b0011' + >>> binary_coded_decimal(2) + '0b0010' + >>> binary_coded_decimal(12) + '0b00010010' + >>> binary_coded_decimal(987) + '0b100110000111' + """ + return "0b" + "".join( + str(bin(int(digit)))[2:].zfill(4) for digit in str(max(0, number)) + ) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 788e4ede9bf4eb180e4b784268d98d657efbd9da Mon Sep 17 00:00:00 2001 From: Jai Vignesh J <108923524+Jaivignesh-afk@users.noreply.github.com> Date: Wed, 18 Oct 2023 04:20:57 +0530 Subject: [PATCH 164/306] Fix doctest power recursion (#10659) * Added doctests to power_using_recursion.py * Added doctest to power_using_recursion.py * Update power_using_recursion.py * Update power_using_recursion.py --------- Co-authored-by: Christian Clauss --- maths/power_using_recursion.py | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/maths/power_using_recursion.py b/maths/power_using_recursion.py index f82097f6d..e82635ba0 100644 --- a/maths/power_using_recursion.py +++ b/maths/power_using_recursion.py @@ -15,18 +15,35 @@ def power(base: int, exponent: int) -> float: """ - power(3, 4) + >>> power(3, 4) 81 >>> power(2, 0) 1 >>> all(power(base, exponent) == pow(base, exponent) ... for base in range(-10, 10) for exponent in range(10)) True + >>> power('a', 1) + 'a' + >>> power('a', 2) + Traceback (most recent call last): + ... + TypeError: can't multiply sequence by non-int of type 'str' + >>> power('a', 'b') + Traceback (most recent call last): + ... + TypeError: unsupported operand type(s) for -: 'str' and 'int' + >>> power(2, -1) + Traceback (most recent call last): + ... + RecursionError: maximum recursion depth exceeded """ return base * power(base, (exponent - 1)) if exponent else 1 if __name__ == "__main__": + from doctests import testmod + + testmod() print("Raise base to the power of exponent using recursion...") base = int(input("Enter the base: ").strip()) exponent = int(input("Enter the exponent: ").strip()) From 361f64c21d7d2528828e20e2eedd59b8d69e5c18 Mon Sep 17 00:00:00 2001 From: Poojan Smart <44301271+PoojanSmart@users.noreply.github.com> Date: Wed, 18 Oct 2023 19:39:13 +0530 Subject: [PATCH 165/306] Adds hinge loss function algorithm (#10628) * Adds exponential moving average algorithm * code clean up * spell correction * Modifies I/O types of function * Replaces generator function * Resolved mypy type error * readibility of code and documentation * Update exponential_moving_average.py * Adds hinge loss function * suggested doc and refactoring changes * refactoring --------- Co-authored-by: Christian Clauss --- machine_learning/loss_functions/hinge_loss.py | 64 +++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 machine_learning/loss_functions/hinge_loss.py diff --git a/machine_learning/loss_functions/hinge_loss.py b/machine_learning/loss_functions/hinge_loss.py new file mode 100644 index 000000000..5480a8cd6 --- /dev/null +++ b/machine_learning/loss_functions/hinge_loss.py @@ -0,0 +1,64 @@ +""" +Hinge Loss + +Description: +Compute the Hinge loss used for training SVM (Support Vector Machine). + +Formula: +loss = max(0, 1 - true * pred) + +Reference: https://en.wikipedia.org/wiki/Hinge_loss + +Author: Poojan Smart +Email: smrtpoojan@gmail.com +""" + +import numpy as np + + +def hinge_loss(y_true: np.ndarray, y_pred: np.ndarray) -> float: + """ + Calculate the mean hinge loss for y_true and y_pred for binary classification. + + Args: + y_true: Array of actual values (ground truth) encoded as -1 and 1. + y_pred: Array of predicted values. + + Returns: + The hinge loss between y_true and y_pred. + + Examples: + >>> y_true = np.array([-1, 1, 1, -1, 1]) + >>> pred = np.array([-4, -0.3, 0.7, 5, 10]) + >>> hinge_loss(y_true, pred) + 1.52 + >>> y_true = np.array([-1, 1, 1, -1, 1, 1]) + >>> pred = np.array([-4, -0.3, 0.7, 5, 10]) + >>> hinge_loss(y_true, pred) + Traceback (most recent call last): + ... + ValueError: Length of predicted and actual array must be same. + >>> y_true = np.array([-1, 1, 10, -1, 1]) + >>> pred = np.array([-4, -0.3, 0.7, 5, 10]) + >>> hinge_loss(y_true, pred) + Traceback (most recent call last): + ... + ValueError: y_true can have values -1 or 1 only. + """ + + if len(y_true) != len(y_pred): + raise ValueError("Length of predicted and actual array must be same.") + + # Raise value error when y_true (encoded labels) have any other values + # than -1 and 1 + if np.any((y_true != -1) & (y_true != 1)): + raise ValueError("y_true can have values -1 or 1 only.") + + hinge_losses = np.maximum(0, 1.0 - (y_true * y_pred)) + return np.mean(hinge_losses) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 572de4f15e266057e806a751006156a212a3812e Mon Sep 17 00:00:00 2001 From: Shivansh Bhatnagar Date: Wed, 18 Oct 2023 20:20:18 +0530 Subject: [PATCH 166/306] Added A General Swish Activation Function inNeural Networks (#10415) * Added A General Swish Activation Function inNeural Networks * Added the general swish function in the SiLU function and renamed it as swish.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Shivansh Bhatnagar Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../{sigmoid_linear_unit.py => swish.py} | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) rename neural_network/activation_functions/{sigmoid_linear_unit.py => swish.py} (72%) diff --git a/neural_network/activation_functions/sigmoid_linear_unit.py b/neural_network/activation_functions/swish.py similarity index 72% rename from neural_network/activation_functions/sigmoid_linear_unit.py rename to neural_network/activation_functions/swish.py index 0ee09bf82..ab3d8fa12 100644 --- a/neural_network/activation_functions/sigmoid_linear_unit.py +++ b/neural_network/activation_functions/swish.py @@ -12,6 +12,7 @@ image classification and machine translation. This script is inspired by a corresponding research paper. * https://arxiv.org/abs/1710.05941 +* https://blog.paperspace.com/swish-activation-function/ """ import numpy as np @@ -49,6 +50,25 @@ def sigmoid_linear_unit(vector: np.ndarray) -> np.ndarray: return vector * sigmoid(vector) +def swish(vector: np.ndarray, trainable_parameter: int) -> np.ndarray: + """ + Parameters: + vector (np.ndarray): A numpy array consisting of real values + trainable_parameter: Use to implement various Swish Activation Functions + + Returns: + swish_vec (np.ndarray): The input numpy array, after applying swish + + Examples: + >>> swish(np.array([-1.0, 1.0, 2.0]), 2) + array([-0.11920292, 0.88079708, 1.96402758]) + + >>> swish(np.array([-2]), 1) + array([-0.23840584]) + """ + return vector * sigmoid(trainable_parameter * vector) + + if __name__ == "__main__": import doctest From 9adb7ced16725e3f6cf24cf93ac81a8dcd351665 Mon Sep 17 00:00:00 2001 From: rtang09 <49603415+rtang09@users.noreply.github.com> Date: Thu, 19 Oct 2023 05:02:04 -0700 Subject: [PATCH 167/306] Update primelib.py (#10209) * Update primelib.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- maths/primelib.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/maths/primelib.py b/maths/primelib.py index 7e33844be..e2d432e18 100644 --- a/maths/primelib.py +++ b/maths/primelib.py @@ -51,6 +51,10 @@ def is_prime(number: int) -> bool: True >>> is_prime(10) False + >>> is_prime(97) + True + >>> is_prime(9991) + False >>> is_prime(-1) Traceback (most recent call last): ... From 30c8d5573a8b052210238487167a3ec2d7241d06 Mon Sep 17 00:00:00 2001 From: rtang09 <49603415+rtang09@users.noreply.github.com> Date: Thu, 19 Oct 2023 05:15:23 -0700 Subject: [PATCH 168/306] Update binary_exponentiation.py (#10253) * Update binary_exponentiation.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/binary_exponentiation.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/maths/binary_exponentiation.py b/maths/binary_exponentiation.py index 05de939d1..7eeca8926 100644 --- a/maths/binary_exponentiation.py +++ b/maths/binary_exponentiation.py @@ -5,6 +5,12 @@ def binary_exponentiation(a: int, n: int) -> int: + """ + >>> binary_exponentiation(3, 5) + 243 + >>> binary_exponentiation(10, 3) + 1000 + """ if n == 0: return 1 @@ -17,6 +23,10 @@ def binary_exponentiation(a: int, n: int) -> int: if __name__ == "__main__": + import doctest + + doctest.testmod() + try: BASE = int(input("Enter Base : ").strip()) POWER = int(input("Enter Power : ").strip()) From b301e589e2c68f583bf3a09f6d4ca224175383b9 Mon Sep 17 00:00:00 2001 From: Iyiola Aloko <48067557+ialoko@users.noreply.github.com> Date: Thu, 19 Oct 2023 08:21:48 -0400 Subject: [PATCH 169/306] Update binary_exponentiation.py (#10342) Co-authored-by: Tianyi Zheng --- maths/binary_exponentiation.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/maths/binary_exponentiation.py b/maths/binary_exponentiation.py index 7eeca8926..f613767f5 100644 --- a/maths/binary_exponentiation.py +++ b/maths/binary_exponentiation.py @@ -6,10 +6,21 @@ def binary_exponentiation(a: int, n: int) -> int: """ + Compute a number raised by some quantity + >>> binary_exponentiation(-1, 3) + -1 + >>> binary_exponentiation(-1, 4) + 1 + >>> binary_exponentiation(2, 2) + 4 >>> binary_exponentiation(3, 5) 243 >>> binary_exponentiation(10, 3) 1000 + >>> binary_exponentiation(5e3, 1) + 5000.0 + >>> binary_exponentiation(-5e3, 1) + -5000.0 """ if n == 0: return 1 @@ -28,7 +39,7 @@ if __name__ == "__main__": doctest.testmod() try: - BASE = int(input("Enter Base : ").strip()) + BASE = int(float(input("Enter Base : ").strip())) POWER = int(input("Enter Power : ").strip()) except ValueError: print("Invalid literal for integer") From 33888646af9d74e46da0175df75b3e5892a72fc7 Mon Sep 17 00:00:00 2001 From: anshul-2010 <96651393+anshul-2010@users.noreply.github.com> Date: Thu, 19 Oct 2023 18:08:02 +0530 Subject: [PATCH 170/306] Edit Distance Algorithm for String Matching (#10571) * Edit Distance Algorithm for String Matching * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update edit_distance.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- strings/edit_distance.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 strings/edit_distance.py diff --git a/strings/edit_distance.py b/strings/edit_distance.py new file mode 100644 index 000000000..e842c8555 --- /dev/null +++ b/strings/edit_distance.py @@ -0,0 +1,32 @@ +def edit_distance(source: str, target: str) -> int: + """ + Edit distance algorithm is a string metric, i.e., it is a way of quantifying how + dissimilar two strings are to one another. It is measured by counting the minimum + number of operations required to transform one string into another. + + This implementation assumes that the cost of operations (insertion, deletion and + substitution) is always 1 + + Args: + source: the initial string with respect to which we are calculating the edit + distance for the target + target: the target string, formed after performing n operations on the source string + + >>> edit_distance("GATTIC", "GALTIC") + 1 + """ + if len(source) == 0: + return len(target) + elif len(target) == 0: + return len(source) + + delta = int(source[-1] != target[-1]) # Substitution + return min( + edit_distance(source[:-1], target[:-1]) + delta, + edit_distance(source, target[:-1]) + 1, + edit_distance(source[:-1], target) + 1, + ) + + +if __name__ == "__main__": + print(edit_distance("ATCGCTG", "TAGCTAA")) # Answer is 4 From 289a4dd6d35a3dd402c98db04d2f39cfc08ea1be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ab=C3=ADlio=20Azevedo?= Date: Thu, 19 Oct 2023 11:35:41 -0300 Subject: [PATCH 171/306] docs: add test scenarios to pull request template (#10396) * docs: add test scenarios to pull request template * Update .github/pull_request_template.md --------- Co-authored-by: Christian Clauss --- .github/pull_request_template.md | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 1f9797fae..e2ae0966c 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -4,6 +4,7 @@ * [ ] Add an algorithm? * [ ] Fix a bug or typo in an existing algorithm? +* [ ] Add or change doctests? -- Note: Please avoid changing both code and tests in a single pull request. * [ ] Documentation change? ### Checklist: From bd3072b84512b33a6fd7d788812340daa8ac3465 Mon Sep 17 00:00:00 2001 From: Ankit Avinash <128812932+Void426@users.noreply.github.com> Date: Thu, 19 Oct 2023 20:16:39 +0530 Subject: [PATCH 172/306] Added Mean Squared Logarithmic Error (MSLE) Loss Function (#10637) * Added Mean Squared Logarithmic Error (MSLE) * Added Mean Squared Logarithmic Error (MSLE) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../mean_squared_logarithmic_error.py | 55 +++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 machine_learning/loss_functions/mean_squared_logarithmic_error.py diff --git a/machine_learning/loss_functions/mean_squared_logarithmic_error.py b/machine_learning/loss_functions/mean_squared_logarithmic_error.py new file mode 100644 index 000000000..935ebff37 --- /dev/null +++ b/machine_learning/loss_functions/mean_squared_logarithmic_error.py @@ -0,0 +1,55 @@ +""" +Mean Squared Logarithmic Error (MSLE) Loss Function + +Description: +MSLE measures the mean squared logarithmic difference between +true values and predicted values, particularly useful when +dealing with regression problems involving skewed or large-value +targets. It is often used when the relative differences between +predicted and true values are more important than absolute +differences. + +Formula: +MSLE = (1/n) * Σ(log(1 + y_true) - log(1 + y_pred))^2 + +Source: +(https://insideaiml.com/blog/MeanSquared-Logarithmic-Error-Loss-1035) +""" + +import numpy as np + + +def mean_squared_logarithmic_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: + """ + Calculate the Mean Squared Logarithmic Error (MSLE) between two arrays. + + Parameters: + - y_true: The true values (ground truth). + - y_pred: The predicted values. + + Returns: + - msle: The Mean Squared Logarithmic Error between y_true and y_pred. + + Example usage: + >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) + >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) + >>> mean_squared_logarithmic_error(true_values, predicted_values) + 0.0030860877925181344 + >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) + >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) + >>> mean_squared_logarithmic_error(true_labels, predicted_probs) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same length. + """ + if len(y_true) != len(y_pred): + raise ValueError("Input arrays must have the same length.") + + squared_logarithmic_errors = (np.log1p(y_true) - np.log1p(y_pred)) ** 2 + return np.mean(squared_logarithmic_errors) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 69876140673881efefcb177e3ba2575b0c221438 Mon Sep 17 00:00:00 2001 From: ketan96-m <40893179+ketan96-m@users.noreply.github.com> Date: Thu, 19 Oct 2023 09:48:53 -0500 Subject: [PATCH 173/306] *added docstring and doctest for find_isolated_nodes (#10684) *added docstring and doctest for edglist *added docstring and doctest for adjm Co-authored-by: Ketan --- graphs/basic_graphs.py | 81 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 75 insertions(+), 6 deletions(-) diff --git a/graphs/basic_graphs.py b/graphs/basic_graphs.py index 065b6185c..25c8045b3 100644 --- a/graphs/basic_graphs.py +++ b/graphs/basic_graphs.py @@ -185,10 +185,29 @@ def topo(g, ind=None, q=None): def adjm(): - n = input().strip() + r""" + Reading an Adjacency matrix + + Parameters: + None + + Returns: + tuple: A tuple containing a list of edges and number of edges + + Example: + >>> # Simulate user input for 3 nodes + >>> input_data = "4\n0 1 0 1\n1 0 1 0\n0 1 0 1\n1 0 1 0\n" + >>> import sys,io + >>> original_input = sys.stdin + >>> sys.stdin = io.StringIO(input_data) # Redirect stdin for testing + >>> adjm() + ([(0, 1, 0, 1), (1, 0, 1, 0), (0, 1, 0, 1), (1, 0, 1, 0)], 4) + >>> sys.stdin = original_input # Restore original stdin + """ + n = int(input().strip()) a = [] for _ in range(n): - a.append(map(int, input().strip().split())) + a.append(tuple(map(int, input().strip().split()))) return a, n @@ -260,10 +279,29 @@ def prim(g, s): def edglist(): - n, m = map(int, input().split(" ")) + r""" + Get the edges and number of edges from the user + + Parameters: + None + + Returns: + tuple: A tuple containing a list of edges and number of edges + + Example: + >>> # Simulate user input for 3 edges and 4 vertices: (1, 2), (2, 3), (3, 4) + >>> input_data = "4 3\n1 2\n2 3\n3 4\n" + >>> import sys,io + >>> original_input = sys.stdin + >>> sys.stdin = io.StringIO(input_data) # Redirect stdin for testing + >>> edglist() + ([(1, 2), (2, 3), (3, 4)], 4) + >>> sys.stdin = original_input # Restore original stdin + """ + n, m = tuple(map(int, input().split(" "))) edges = [] for _ in range(m): - edges.append(map(int, input().split(" "))) + edges.append(tuple(map(int, input().split(" ")))) return edges, n @@ -278,7 +316,9 @@ def edglist(): def krusk(e_and_n): - # Sort edges on the basis of distance + """ + Sort edges on the basis of distance + """ (e, n) = e_and_n e.sort(reverse=True, key=lambda x: x[2]) s = [{i} for i in range(1, n + 1)] @@ -299,8 +339,37 @@ def krusk(e_and_n): break -# find the isolated node in the graph def find_isolated_nodes(graph): + """ + Find the isolated node in the graph + + Parameters: + graph (dict): A dictionary representing a graph. + + Returns: + list: A list of isolated nodes. + + Examples: + >>> graph1 = {1: [2, 3], 2: [1, 3], 3: [1, 2], 4: []} + >>> find_isolated_nodes(graph1) + [4] + + >>> graph2 = {'A': ['B', 'C'], 'B': ['A'], 'C': ['A'], 'D': []} + >>> find_isolated_nodes(graph2) + ['D'] + + >>> graph3 = {'X': [], 'Y': [], 'Z': []} + >>> find_isolated_nodes(graph3) + ['X', 'Y', 'Z'] + + >>> graph4 = {1: [2, 3], 2: [1, 3], 3: [1, 2]} + >>> find_isolated_nodes(graph4) + [] + + >>> graph5 = {} + >>> find_isolated_nodes(graph5) + [] + """ isolated = [] for node in graph: if not graph[node]: From 26ffad9d17232668d0630edb70167e5123a7f35c Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Thu, 19 Oct 2023 19:31:51 +0200 Subject: [PATCH 174/306] Simplify is_bst.py (#10627) * Simplify is_bst.py * updating DIRECTORY.md * Update is_bst.py * Rename is_bst.py to is_sorted.py * updating DIRECTORY.md * Update data_structures/binary_tree/is_sorted.py Co-authored-by: Tianyi Zheng --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- DIRECTORY.md | 3 +- data_structures/binary_tree/is_bst.py | 131 ----------------------- data_structures/binary_tree/is_sorted.py | 97 +++++++++++++++++ 3 files changed, 99 insertions(+), 132 deletions(-) delete mode 100644 data_structures/binary_tree/is_bst.py create mode 100644 data_structures/binary_tree/is_sorted.py diff --git a/DIRECTORY.md b/DIRECTORY.md index d878f1c79..0999d2e86 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -199,7 +199,8 @@ * [Flatten Binarytree To Linkedlist](data_structures/binary_tree/flatten_binarytree_to_linkedlist.py) * [Floor And Ceiling](data_structures/binary_tree/floor_and_ceiling.py) * [Inorder Tree Traversal 2022](data_structures/binary_tree/inorder_tree_traversal_2022.py) - * [Is Bst](data_structures/binary_tree/is_bst.py) + * [Is Sorted](data_structures/binary_tree/is_sorted.py) + * [Is Sum Tree](data_structures/binary_tree/is_sum_tree.py) * [Lazy Segment Tree](data_structures/binary_tree/lazy_segment_tree.py) * [Lowest Common Ancestor](data_structures/binary_tree/lowest_common_ancestor.py) * [Maximum Fenwick Tree](data_structures/binary_tree/maximum_fenwick_tree.py) diff --git a/data_structures/binary_tree/is_bst.py b/data_structures/binary_tree/is_bst.py deleted file mode 100644 index 0b2ef8c9f..000000000 --- a/data_structures/binary_tree/is_bst.py +++ /dev/null @@ -1,131 +0,0 @@ -""" -Author : Alexander Pantyukhin -Date : November 2, 2022 - -Task: -Given the root of a binary tree, determine if it is a valid binary search -tree (BST). - -A valid binary search tree is defined as follows: - -- The left subtree of a node contains only nodes with keys less than the node's key. -- The right subtree of a node contains only nodes with keys greater than the node's key. -- Both the left and right subtrees must also be binary search trees. - -Implementation notes: -Depth-first search approach. - -leetcode: https://leetcode.com/problems/validate-binary-search-tree/ - -Let n is the number of nodes in tree -Runtime: O(n) -Space: O(1) -""" - -from __future__ import annotations - -from dataclasses import dataclass - - -@dataclass -class TreeNode: - data: float - left: TreeNode | None = None - right: TreeNode | None = None - - -def is_binary_search_tree(root: TreeNode | None) -> bool: - """ - >>> is_binary_search_tree(TreeNode(data=2, - ... left=TreeNode(data=1), - ... right=TreeNode(data=3)) - ... ) - True - - >>> is_binary_search_tree(TreeNode(data=0, - ... left=TreeNode(data=-11), - ... right=TreeNode(data=3)) - ... ) - True - - >>> is_binary_search_tree(TreeNode(data=5, - ... left=TreeNode(data=1), - ... right=TreeNode(data=4, left=TreeNode(data=3))) - ... ) - False - - >>> is_binary_search_tree(TreeNode(data='a', - ... left=TreeNode(data=1), - ... right=TreeNode(data=4, left=TreeNode(data=3))) - ... ) - Traceback (most recent call last): - ... - ValueError: Each node should be type of TreeNode and data should be float. - - >>> is_binary_search_tree(TreeNode(data=2, - ... left=TreeNode([]), - ... right=TreeNode(data=4, left=TreeNode(data=3))) - ... ) - Traceback (most recent call last): - ... - ValueError: Each node should be type of TreeNode and data should be float. - """ - - # Validation - def is_valid_tree(node: TreeNode | None) -> bool: - """ - >>> is_valid_tree(None) - True - >>> is_valid_tree('abc') - False - >>> is_valid_tree(TreeNode(data='not a float')) - False - >>> is_valid_tree(TreeNode(data=1, left=TreeNode('123'))) - False - """ - if node is None: - return True - - if not isinstance(node, TreeNode): - return False - - try: - float(node.data) - except (TypeError, ValueError): - return False - - return is_valid_tree(node.left) and is_valid_tree(node.right) - - if not is_valid_tree(root): - raise ValueError( - "Each node should be type of TreeNode and data should be float." - ) - - def is_binary_search_tree_recursive_check( - node: TreeNode | None, left_bound: float, right_bound: float - ) -> bool: - """ - >>> is_binary_search_tree_recursive_check(None) - True - >>> is_binary_search_tree_recursive_check(TreeNode(data=1), 10, 20) - False - """ - - if node is None: - return True - - return ( - left_bound < node.data < right_bound - and is_binary_search_tree_recursive_check(node.left, left_bound, node.data) - and is_binary_search_tree_recursive_check( - node.right, node.data, right_bound - ) - ) - - return is_binary_search_tree_recursive_check(root, -float("inf"), float("inf")) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() diff --git a/data_structures/binary_tree/is_sorted.py b/data_structures/binary_tree/is_sorted.py new file mode 100644 index 000000000..5876c5a9c --- /dev/null +++ b/data_structures/binary_tree/is_sorted.py @@ -0,0 +1,97 @@ +""" +Given the root of a binary tree, determine if it is a valid binary search tree (BST). + +A valid binary search tree is defined as follows: +- The left subtree of a node contains only nodes with keys less than the node's key. +- The right subtree of a node contains only nodes with keys greater than the node's key. +- Both the left and right subtrees must also be binary search trees. + +In effect, a binary tree is a valid BST if its nodes are sorted in ascending order. +leetcode: https://leetcode.com/problems/validate-binary-search-tree/ + +If n is the number of nodes in the tree then: +Runtime: O(n) +Space: O(1) +""" +from __future__ import annotations + +from collections.abc import Iterator +from dataclasses import dataclass + + +@dataclass +class Node: + data: float + left: Node | None = None + right: Node | None = None + + def __iter__(self) -> Iterator[float]: + """ + >>> root = Node(data=2.1) + >>> list(root) + [2.1] + >>> root.left=Node(data=2.0) + >>> list(root) + [2.0, 2.1] + >>> root.right=Node(data=2.2) + >>> list(root) + [2.0, 2.1, 2.2] + """ + if self.left: + yield from self.left + yield self.data + if self.right: + yield from self.right + + @property + def is_sorted(self) -> bool: + """ + >>> Node(data='abc').is_sorted + True + >>> Node(data=2, + ... left=Node(data=1.999), + ... right=Node(data=3)).is_sorted + True + >>> Node(data=0, + ... left=Node(data=0), + ... right=Node(data=0)).is_sorted + True + >>> Node(data=0, + ... left=Node(data=-11), + ... right=Node(data=3)).is_sorted + True + >>> Node(data=5, + ... left=Node(data=1), + ... right=Node(data=4, left=Node(data=3))).is_sorted + False + >>> Node(data='a', + ... left=Node(data=1), + ... right=Node(data=4, left=Node(data=3))).is_sorted + Traceback (most recent call last): + ... + TypeError: '<' not supported between instances of 'str' and 'int' + >>> Node(data=2, + ... left=Node([]), + ... right=Node(data=4, left=Node(data=3))).is_sorted + Traceback (most recent call last): + ... + TypeError: '<' not supported between instances of 'int' and 'list' + """ + if self.left and (self.data < self.left.data or not self.left.is_sorted): + return False + if self.right and (self.data > self.right.data or not self.right.is_sorted): + return False + return True + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + tree = Node(data=2.1, left=Node(data=2.0), right=Node(data=2.2)) + print(f"Tree {list(tree)} is sorted: {tree.is_sorted = }.") + assert tree.right + tree.right.data = 2.0 + print(f"Tree {list(tree)} is sorted: {tree.is_sorted = }.") + tree.right.data = 2.1 + print(f"Tree {list(tree)} is sorted: {tree.is_sorted = }.") From be94690decde9f0e1df78b41d2a22e7e69bc176d Mon Sep 17 00:00:00 2001 From: NikhithaBandari <91549688+NikhithaBandari@users.noreply.github.com> Date: Thu, 19 Oct 2023 23:19:47 +0530 Subject: [PATCH 175/306] Create swap_all_odd_and_even_bits.py (#10692) * Create swap_all_odd_and_even_bits.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update swap_all_odd_and_even_bits.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * 6: 00000110 --> 9: 00001001 * Update swap_all_odd_and_even_bits.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../swap_all_odd_and_even_bits.py | 58 +++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 bit_manipulation/swap_all_odd_and_even_bits.py diff --git a/bit_manipulation/swap_all_odd_and_even_bits.py b/bit_manipulation/swap_all_odd_and_even_bits.py new file mode 100644 index 000000000..5ec84417b --- /dev/null +++ b/bit_manipulation/swap_all_odd_and_even_bits.py @@ -0,0 +1,58 @@ +def show_bits(before: int, after: int) -> str: + """ + >>> print(show_bits(0, 0xFFFF)) + 0: 00000000 + 65535: 1111111111111111 + """ + return f"{before:>5}: {before:08b}\n{after:>5}: {after:08b}" + + +def swap_odd_even_bits(num: int) -> int: + """ + 1. We use bitwise AND operations to separate the even bits (0, 2, 4, 6, etc.) and + odd bits (1, 3, 5, 7, etc.) in the input number. + 2. We then right-shift the even bits by 1 position and left-shift the odd bits by + 1 position to swap them. + 3. Finally, we combine the swapped even and odd bits using a bitwise OR operation + to obtain the final result. + >>> print(show_bits(0, swap_odd_even_bits(0))) + 0: 00000000 + 0: 00000000 + >>> print(show_bits(1, swap_odd_even_bits(1))) + 1: 00000001 + 2: 00000010 + >>> print(show_bits(2, swap_odd_even_bits(2))) + 2: 00000010 + 1: 00000001 + >>> print(show_bits(3, swap_odd_even_bits(3))) + 3: 00000011 + 3: 00000011 + >>> print(show_bits(4, swap_odd_even_bits(4))) + 4: 00000100 + 8: 00001000 + >>> print(show_bits(5, swap_odd_even_bits(5))) + 5: 00000101 + 10: 00001010 + >>> print(show_bits(6, swap_odd_even_bits(6))) + 6: 00000110 + 9: 00001001 + >>> print(show_bits(23, swap_odd_even_bits(23))) + 23: 00010111 + 43: 00101011 + """ + # Get all even bits - 0xAAAAAAAA is a 32-bit number with all even bits set to 1 + even_bits = num & 0xAAAAAAAA + + # Get all odd bits - 0x55555555 is a 32-bit number with all odd bits set to 1 + odd_bits = num & 0x55555555 + + # Right shift even bits and left shift odd bits and swap them + return even_bits >> 1 | odd_bits << 1 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + for i in (-1, 0, 1, 2, 3, 4, 23, 24): + print(show_bits(i, swap_odd_even_bits(i)), "\n") From 34f48b684bce39cb24667e5181b268c9f3bf9980 Mon Sep 17 00:00:00 2001 From: Anupamaraie <91787285+Anupamaraie@users.noreply.github.com> Date: Fri, 20 Oct 2023 01:50:16 +0545 Subject: [PATCH 176/306] Create vernam_cipher.py (#10702) * Create vernam_cipher.py added vernam cipher * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py added return type * Update vernam_cipher.py added type hint for plaintext and key * Update vernam_cipher.py added tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py Added tests * Update vernam_cipher.py * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * Update vernam_cipher.py * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update vernam_cipher.py * Update ciphers/vernam_cipher.py * Update vernam_cipher.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- ciphers/vernam_cipher.py | 42 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 ciphers/vernam_cipher.py diff --git a/ciphers/vernam_cipher.py b/ciphers/vernam_cipher.py new file mode 100644 index 000000000..197f28635 --- /dev/null +++ b/ciphers/vernam_cipher.py @@ -0,0 +1,42 @@ +def vernam_encrypt(plaintext: str, key: str) -> str: + """ + >>> vernam_encrypt("HELLO","KEY") + 'RIJVS' + """ + ciphertext = "" + for i in range(len(plaintext)): + ct = ord(key[i % len(key)]) - 65 + ord(plaintext[i]) - 65 + while ct > 25: + ct = ct - 26 + ciphertext += chr(65 + ct) + return ciphertext + + +def vernam_decrypt(ciphertext: str, key: str) -> str: + """ + >>> vernam_decrypt("RIJVS","KEY") + 'HELLO' + """ + decrypted_text = "" + for i in range(len(ciphertext)): + ct = ord(ciphertext[i]) - ord(key[i % len(key)]) + while ct < 0: + ct = 26 + ct + decrypted_text += chr(65 + ct) + return decrypted_text + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + + # Example usage + plaintext = "HELLO" + key = "KEY" + encrypted_text = vernam_encrypt(plaintext, key) + decrypted_text = vernam_decrypt(encrypted_text, key) + print("\n\n") + print("Plaintext:", plaintext) + print("Encrypted:", encrypted_text) + print("Decrypted:", decrypted_text) From 9875f374f4762d6219067b2e7909a762f25b68ba Mon Sep 17 00:00:00 2001 From: Adam Ross <14985050+R055A@users.noreply.github.com> Date: Thu, 19 Oct 2023 22:45:51 +0200 Subject: [PATCH 177/306] Consolidate bubble sort iterative and recursive (#10651) * Consolidate bubble sort iterative and recursive * Update sorts/bubble_sort.py Co-authored-by: Christian Clauss * Refactor bubble sort func signature, doctest, timer * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update bubble_sort.py --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- sorts/bubble_sort.py | 109 ++++++++++++++++++++++++++++----- sorts/recursive_bubble_sort.py | 42 ------------- 2 files changed, 92 insertions(+), 59 deletions(-) delete mode 100644 sorts/recursive_bubble_sort.py diff --git a/sorts/bubble_sort.py b/sorts/bubble_sort.py index 7da4362a5..bdf85c70d 100644 --- a/sorts/bubble_sort.py +++ b/sorts/bubble_sort.py @@ -1,7 +1,7 @@ from typing import Any -def bubble_sort(collection: list[Any]) -> list[Any]: +def bubble_sort_iterative(collection: list[Any]) -> list[Any]: """Pure implementation of bubble sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous @@ -9,25 +9,37 @@ def bubble_sort(collection: list[Any]) -> list[Any]: :return: the same collection ordered by ascending Examples: - >>> bubble_sort([0, 5, 2, 3, 2]) + >>> bubble_sort_iterative([0, 5, 2, 3, 2]) [0, 2, 2, 3, 5] - >>> bubble_sort([0, 5, 2, 3, 2]) == sorted([0, 5, 2, 3, 2]) + >>> bubble_sort_iterative([]) + [] + >>> bubble_sort_iterative([-2, -45, -5]) + [-45, -5, -2] + >>> bubble_sort_iterative([-23, 0, 6, -4, 34]) + [-23, -4, 0, 6, 34] + >>> bubble_sort_iterative([0, 5, 2, 3, 2]) == sorted([0, 5, 2, 3, 2]) True - >>> bubble_sort([]) == sorted([]) + >>> bubble_sort_iterative([]) == sorted([]) True - >>> bubble_sort([-2, -45, -5]) == sorted([-2, -45, -5]) + >>> bubble_sort_iterative([-2, -45, -5]) == sorted([-2, -45, -5]) True - >>> bubble_sort([-23, 0, 6, -4, 34]) == sorted([-23, 0, 6, -4, 34]) + >>> bubble_sort_iterative([-23, 0, 6, -4, 34]) == sorted([-23, 0, 6, -4, 34]) True - >>> bubble_sort(['d', 'a', 'b', 'e', 'c']) == sorted(['d', 'a', 'b', 'e', 'c']) + >>> bubble_sort_iterative(['d', 'a', 'b', 'e']) == sorted(['d', 'a', 'b', 'e']) True + >>> bubble_sort_iterative(['z', 'a', 'y', 'b', 'x', 'c']) + ['a', 'b', 'c', 'x', 'y', 'z'] + >>> bubble_sort_iterative([1.1, 3.3, 5.5, 7.7, 2.2, 4.4, 6.6]) + [1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7] + >>> bubble_sort_iterative([1, 3.3, 5, 7.7, 2, 4.4, 6]) + [1, 2, 3.3, 4.4, 5, 6, 7.7] >>> import random - >>> collection = random.sample(range(-50, 50), 100) - >>> bubble_sort(collection) == sorted(collection) + >>> collection_arg = random.sample(range(-50, 50), 100) + >>> bubble_sort_iterative(collection_arg) == sorted(collection_arg) True >>> import string - >>> collection = random.choices(string.ascii_letters + string.digits, k=100) - >>> bubble_sort(collection) == sorted(collection) + >>> collection_arg = random.choices(string.ascii_letters + string.digits, k=100) + >>> bubble_sort_iterative(collection_arg) == sorted(collection_arg) True """ length = len(collection) @@ -42,14 +54,77 @@ def bubble_sort(collection: list[Any]) -> list[Any]: return collection +def bubble_sort_recursive(collection: list[Any]) -> list[Any]: + """It is similar iterative bubble sort but recursive. + + :param collection: mutable ordered sequence of elements + :return: the same list in ascending order + + Examples: + >>> bubble_sort_recursive([0, 5, 2, 3, 2]) + [0, 2, 2, 3, 5] + >>> bubble_sort_iterative([]) + [] + >>> bubble_sort_recursive([-2, -45, -5]) + [-45, -5, -2] + >>> bubble_sort_recursive([-23, 0, 6, -4, 34]) + [-23, -4, 0, 6, 34] + >>> bubble_sort_recursive([0, 5, 2, 3, 2]) == sorted([0, 5, 2, 3, 2]) + True + >>> bubble_sort_recursive([]) == sorted([]) + True + >>> bubble_sort_recursive([-2, -45, -5]) == sorted([-2, -45, -5]) + True + >>> bubble_sort_recursive([-23, 0, 6, -4, 34]) == sorted([-23, 0, 6, -4, 34]) + True + >>> bubble_sort_recursive(['d', 'a', 'b', 'e']) == sorted(['d', 'a', 'b', 'e']) + True + >>> bubble_sort_recursive(['z', 'a', 'y', 'b', 'x', 'c']) + ['a', 'b', 'c', 'x', 'y', 'z'] + >>> bubble_sort_recursive([1.1, 3.3, 5.5, 7.7, 2.2, 4.4, 6.6]) + [1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7] + >>> bubble_sort_recursive([1, 3.3, 5, 7.7, 2, 4.4, 6]) + [1, 2, 3.3, 4.4, 5, 6, 7.7] + >>> import random + >>> collection_arg = random.sample(range(-50, 50), 100) + >>> bubble_sort_recursive(collection_arg) == sorted(collection_arg) + True + >>> import string + >>> collection_arg = random.choices(string.ascii_letters + string.digits, k=100) + >>> bubble_sort_recursive(collection_arg) == sorted(collection_arg) + True + """ + length = len(collection) + swapped = False + for i in range(length - 1): + if collection[i] > collection[i + 1]: + collection[i], collection[i + 1] = collection[i + 1], collection[i] + swapped = True + + return collection if not swapped else bubble_sort_recursive(collection) + + if __name__ == "__main__": import doctest - import time + from random import sample + from timeit import timeit doctest.testmod() - user_input = input("Enter numbers separated by a comma:").strip() - unsorted = [int(item) for item in user_input.split(",")] - start = time.process_time() - print(*bubble_sort(unsorted), sep=",") - print(f"Processing time: {(time.process_time() - start)%1e9 + 7}") + # Benchmark: Iterative seems slightly faster than recursive. + num_runs = 10_000 + unsorted = sample(range(-50, 50), 100) + timer_iterative = timeit( + "bubble_sort_iterative(unsorted[:])", globals=globals(), number=num_runs + ) + print("\nIterative bubble sort:") + print(*bubble_sort_iterative(unsorted), sep=",") + print(f"Processing time (iterative): {timer_iterative:.5f}s for {num_runs:,} runs") + + unsorted = sample(range(-50, 50), 100) + timer_recursive = timeit( + "bubble_sort_recursive(unsorted[:])", globals=globals(), number=num_runs + ) + print("\nRecursive bubble sort:") + print(*bubble_sort_recursive(unsorted), sep=",") + print(f"Processing time (recursive): {timer_recursive:.5f}s for {num_runs:,} runs") diff --git a/sorts/recursive_bubble_sort.py b/sorts/recursive_bubble_sort.py deleted file mode 100644 index 82af89593..000000000 --- a/sorts/recursive_bubble_sort.py +++ /dev/null @@ -1,42 +0,0 @@ -def bubble_sort(list_data: list, length: int = 0) -> list: - """ - It is similar is bubble sort but recursive. - :param list_data: mutable ordered sequence of elements - :param length: length of list data - :return: the same list in ascending order - - >>> bubble_sort([0, 5, 2, 3, 2], 5) - [0, 2, 2, 3, 5] - - >>> bubble_sort([], 0) - [] - - >>> bubble_sort([-2, -45, -5], 3) - [-45, -5, -2] - - >>> bubble_sort([-23, 0, 6, -4, 34], 5) - [-23, -4, 0, 6, 34] - - >>> bubble_sort([-23, 0, 6, -4, 34], 5) == sorted([-23, 0, 6, -4, 34]) - True - - >>> bubble_sort(['z','a','y','b','x','c'], 6) - ['a', 'b', 'c', 'x', 'y', 'z'] - - >>> bubble_sort([1.1, 3.3, 5.5, 7.7, 2.2, 4.4, 6.6]) - [1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7] - """ - length = length or len(list_data) - swapped = False - for i in range(length - 1): - if list_data[i] > list_data[i + 1]: - list_data[i], list_data[i + 1] = list_data[i + 1], list_data[i] - swapped = True - - return list_data if not swapped else bubble_sort(list_data, length - 1) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() From 51805338afbbf76c3d1371b60ba301eaaf094359 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Thu, 19 Oct 2023 23:35:38 -0400 Subject: [PATCH 178/306] Fix ruff error in `machine_learning/sequential_minimum_optimization.py` (#10717) * updating DIRECTORY.md * Try to fix ruff error in sequential_minimum_optimization.py * Update sequential_minimum_optimization.py * Update sequential_minimum_optimization.py * Update sequential_minimum_optimization.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 13 +++++++++++-- machine_learning/sequential_minimum_optimization.py | 2 +- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 0999d2e86..1aaabf782 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -37,6 +37,7 @@ ## Bit Manipulation * [Binary And Operator](bit_manipulation/binary_and_operator.py) + * [Binary Coded Decimal](bit_manipulation/binary_coded_decimal.py) * [Binary Count Setbits](bit_manipulation/binary_count_setbits.py) * [Binary Count Trailing Zeros](bit_manipulation/binary_count_trailing_zeros.py) * [Binary Or Operator](bit_manipulation/binary_or_operator.py) @@ -57,6 +58,7 @@ * [Power Of 4](bit_manipulation/power_of_4.py) * [Reverse Bits](bit_manipulation/reverse_bits.py) * [Single Bit Manipulation Operations](bit_manipulation/single_bit_manipulation_operations.py) + * [Swap All Odd And Even Bits](bit_manipulation/swap_all_odd_and_even_bits.py) ## Blockchain * [Diophantine Equation](blockchain/diophantine_equation.py) @@ -124,6 +126,7 @@ * [Transposition Cipher](ciphers/transposition_cipher.py) * [Transposition Cipher Encrypt Decrypt File](ciphers/transposition_cipher_encrypt_decrypt_file.py) * [Trifid Cipher](ciphers/trifid_cipher.py) + * [Vernam Cipher](ciphers/vernam_cipher.py) * [Vigenere Cipher](ciphers/vigenere_cipher.py) * [Xor Cipher](ciphers/xor_cipher.py) @@ -163,6 +166,7 @@ * [Molecular Chemistry](conversions/molecular_chemistry.py) * [Octal To Binary](conversions/octal_to_binary.py) * [Octal To Decimal](conversions/octal_to_decimal.py) + * [Octal To Hexadecimal](conversions/octal_to_hexadecimal.py) * [Prefix Conversions](conversions/prefix_conversions.py) * [Prefix Conversions String](conversions/prefix_conversions_string.py) * [Pressure Conversions](conversions/pressure_conversions.py) @@ -183,6 +187,7 @@ * [Prefix Sum](data_structures/arrays/prefix_sum.py) * [Product Sum](data_structures/arrays/product_sum.py) * [Sparse Table](data_structures/arrays/sparse_table.py) + * [Sudoku Solver](data_structures/arrays/sudoku_solver.py) * Binary Tree * [Avl Tree](data_structures/binary_tree/avl_tree.py) * [Basic Binary Tree](data_structures/binary_tree/basic_binary_tree.py) @@ -548,8 +553,10 @@ * Loss Functions * [Binary Cross Entropy](machine_learning/loss_functions/binary_cross_entropy.py) * [Categorical Cross Entropy](machine_learning/loss_functions/categorical_cross_entropy.py) + * [Hinge Loss](machine_learning/loss_functions/hinge_loss.py) * [Huber Loss](machine_learning/loss_functions/huber_loss.py) * [Mean Squared Error](machine_learning/loss_functions/mean_squared_error.py) + * [Mean Squared Logarithmic Error](machine_learning/loss_functions/mean_squared_logarithmic_error.py) * [Mfcc](machine_learning/mfcc.py) * [Multilayer Perceptron Classifier](machine_learning/multilayer_perceptron_classifier.py) * [Polynomial Regression](machine_learning/polynomial_regression.py) @@ -734,6 +741,7 @@ * [Inverse Of Matrix](matrix/inverse_of_matrix.py) * [Largest Square Area In Matrix](matrix/largest_square_area_in_matrix.py) * [Matrix Class](matrix/matrix_class.py) + * [Matrix Multiplication Recursion](matrix/matrix_multiplication_recursion.py) * [Matrix Operation](matrix/matrix_operation.py) * [Max Area Of Island](matrix/max_area_of_island.py) * [Median Matrix](matrix/median_matrix.py) @@ -760,10 +768,10 @@ * [Mish](neural_network/activation_functions/mish.py) * [Rectified Linear Unit](neural_network/activation_functions/rectified_linear_unit.py) * [Scaled Exponential Linear Unit](neural_network/activation_functions/scaled_exponential_linear_unit.py) - * [Sigmoid Linear Unit](neural_network/activation_functions/sigmoid_linear_unit.py) * [Soboleva Modified Hyperbolic Tangent](neural_network/activation_functions/soboleva_modified_hyperbolic_tangent.py) * [Softplus](neural_network/activation_functions/softplus.py) * [Squareplus](neural_network/activation_functions/squareplus.py) + * [Swish](neural_network/activation_functions/swish.py) * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](neural_network/convolution_neural_network.py) * [Perceptron](neural_network/perceptron.py) @@ -1185,7 +1193,6 @@ * [Quick Sort](sorts/quick_sort.py) * [Quick Sort 3 Partition](sorts/quick_sort_3_partition.py) * [Radix Sort](sorts/radix_sort.py) - * [Recursive Bubble Sort](sorts/recursive_bubble_sort.py) * [Recursive Insertion Sort](sorts/recursive_insertion_sort.py) * [Recursive Mergesort Array](sorts/recursive_mergesort_array.py) * [Recursive Quick Sort](sorts/recursive_quick_sort.py) @@ -1216,12 +1223,14 @@ * [Damerau Levenshtein Distance](strings/damerau_levenshtein_distance.py) * [Detecting English Programmatically](strings/detecting_english_programmatically.py) * [Dna](strings/dna.py) + * [Edit Distance](strings/edit_distance.py) * [Frequency Finder](strings/frequency_finder.py) * [Hamming Distance](strings/hamming_distance.py) * [Indian Phone Validator](strings/indian_phone_validator.py) * [Is Contains Unique Chars](strings/is_contains_unique_chars.py) * [Is Isogram](strings/is_isogram.py) * [Is Pangram](strings/is_pangram.py) + * [Is Polish National Id](strings/is_polish_national_id.py) * [Is Spain National Id](strings/is_spain_national_id.py) * [Is Srilankan Phone Number](strings/is_srilankan_phone_number.py) * [Is Valid Email Address](strings/is_valid_email_address.py) diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index b24f5669e..9e2304859 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -454,7 +454,7 @@ def test_cancel_data(): print("Hello!\nStart test svm by smo algorithm!") # 0: download dataset and load into pandas' dataframe if not os.path.exists(r"cancel_data.csv"): - request = urllib.request.Request( + request = urllib.request.Request( # noqa: S310 CANCER_DATASET_URL, headers={"User-Agent": "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"}, ) From 4154428351cd60db504eb232e3b7900987a2fa19 Mon Sep 17 00:00:00 2001 From: Saptadeep Banerjee <69459134+imSanko@users.noreply.github.com> Date: Fri, 20 Oct 2023 09:59:24 +0530 Subject: [PATCH 179/306] [ADD]: Improved tests in power recursion! (#10664) * Added new tests! * [ADD]: Inproved Tests * fixed * Removed spaces * Changed the file name * Added Changes * changed the code and kept the test cases * changed the code and kept the test cases * missed the line * removed spaces * Update power_using_recursion.py --------- Co-authored-by: Christian Clauss --- maths/power_using_recursion.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/maths/power_using_recursion.py b/maths/power_using_recursion.py index e82635ba0..462fc45bf 100644 --- a/maths/power_using_recursion.py +++ b/maths/power_using_recursion.py @@ -15,6 +15,8 @@ def power(base: int, exponent: int) -> float: """ + Calculate the power of a base raised to an exponent. + >>> power(3, 4) 81 >>> power(2, 0) From 82fc24ce96036b6e1180de06c513bbaacda6a550 Mon Sep 17 00:00:00 2001 From: RaymondDashWu <33266041+RaymondDashWu@users.noreply.github.com> Date: Thu, 19 Oct 2023 21:42:20 -0700 Subject: [PATCH 180/306] Test cases for check_bipartite_graph_bfs (#10688) * [ADD] tests for check_bipartite_graph_bfs * linter fix? * linter fix * [ADD] more test cases check_bipartite_graph_bfs * doctest fixes. Forgot to add 'Traceback...' * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * missed a Traceback * Update check_bipartite_graph_bfs.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- graphs/check_bipartite_graph_bfs.py | 45 +++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/graphs/check_bipartite_graph_bfs.py b/graphs/check_bipartite_graph_bfs.py index 7fc57cbc7..6c385d54e 100644 --- a/graphs/check_bipartite_graph_bfs.py +++ b/graphs/check_bipartite_graph_bfs.py @@ -10,6 +10,48 @@ from queue import Queue def check_bipartite(graph): + """ + >>> check_bipartite({}) + True + >>> check_bipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}) + True + >>> check_bipartite({0: [1, 2, 3], 1: [0, 2], 2: [0, 1, 3], 3: [0, 2]}) + False + >>> check_bipartite({0: [4], 1: [], 2: [4], 3: [4], 4: [0, 2, 3]}) + True + >>> check_bipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]}) + False + >>> check_bipartite({7: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]}) + Traceback (most recent call last): + ... + KeyError: 0 + >>> check_bipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 9: [0]}) + Traceback (most recent call last): + ... + KeyError: 4 + >>> check_bipartite({0: [-1, 3], 1: [0, -2]}) + Traceback (most recent call last): + ... + IndexError: list index out of range + >>> check_bipartite({-1: [0, 2], 0: [-1, 1], 1: [0, 2], 2: [-1, 1]}) + True + >>> check_bipartite({0.9: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}) + Traceback (most recent call last): + ... + KeyError: 0 + >>> check_bipartite({0: [1.0, 3.0], 1.0: [0, 2.0], 2.0: [1.0, 3.0], 3.0: [0, 2.0]}) + Traceback (most recent call last): + ... + TypeError: list indices must be integers or slices, not float + >>> check_bipartite({"a": [1, 3], "b": [0, 2], "c": [1, 3], "d": [0, 2]}) + Traceback (most recent call last): + ... + KeyError: 0 + >>> check_bipartite({0: ["b", "d"], 1: ["a", "c"], 2: ["b", "d"], 3: ["a", "c"]}) + Traceback (most recent call last): + ... + TypeError: list indices must be integers or slices, not str + """ queue = Queue() visited = [False] * len(graph) color = [-1] * len(graph) @@ -45,3 +87,6 @@ def check_bipartite(graph): if __name__ == "__main__": # Adjacency List of graph print(check_bipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]})) + import doctest + + doctest.testmod() From 197604898b85e84cfbaee0a0dd06095db8d1c7b6 Mon Sep 17 00:00:00 2001 From: shivaparihar6119 <122152343+shivaparihar6119@users.noreply.github.com> Date: Fri, 20 Oct 2023 11:39:58 +0530 Subject: [PATCH 181/306] Concatenates both check bipatrite graphs(bfs&dfs) (#10708) * sync * fixes#8098 * deleted: graphs/check_bipartite_graph_all.py new file: graphs/check_bipatrite,py * renamed: graphs/check_bipatrite,py -> graphs/check_bipatrite.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add the new tests --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 10 -- graphs/check_bipartite_graph_bfs.py | 92 -------------- graphs/check_bipartite_graph_dfs.py | 55 --------- graphs/check_bipatrite.py | 179 ++++++++++++++++++++++++++++ 4 files changed, 179 insertions(+), 157 deletions(-) delete mode 100644 graphs/check_bipartite_graph_bfs.py delete mode 100644 graphs/check_bipartite_graph_dfs.py create mode 100644 graphs/check_bipatrite.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 1aaabf782..1320c70ef 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -65,9 +65,7 @@ ## Boolean Algebra * [And Gate](boolean_algebra/and_gate.py) - * [Imply Gate](boolean_algebra/imply_gate.py) * [Nand Gate](boolean_algebra/nand_gate.py) - * [Nimply Gate](boolean_algebra/nimply_gate.py) * [Nor Gate](boolean_algebra/nor_gate.py) * [Not Gate](boolean_algebra/not_gate.py) * [Or Gate](boolean_algebra/or_gate.py) @@ -180,9 +178,7 @@ ## Data Structures * Arrays * [Equilibrium Index In Array](data_structures/arrays/equilibrium_index_in_array.py) - * [Find Triplets With 0 Sum](data_structures/arrays/find_triplets_with_0_sum.py) * [Median Two Array](data_structures/arrays/median_two_array.py) - * [Pairs With Given Sum](data_structures/arrays/pairs_with_given_sum.py) * [Permutations](data_structures/arrays/permutations.py) * [Prefix Sum](data_structures/arrays/prefix_sum.py) * [Product Sum](data_structures/arrays/product_sum.py) @@ -402,7 +398,6 @@ ## Financial * [Equated Monthly Installments](financial/equated_monthly_installments.py) - * [Exponential Moving Average](financial/exponential_moving_average.py) * [Interest](financial/interest.py) * [Present Value](financial/present_value.py) * [Price Plus Tax](financial/price_plus_tax.py) @@ -711,7 +706,6 @@ * [Sin](maths/sin.py) * [Sock Merchant](maths/sock_merchant.py) * [Softmax](maths/softmax.py) - * [Solovay Strassen Primality Test](maths/solovay_strassen_primality_test.py) * [Square Root](maths/square_root.py) * [Sum Of Arithmetic Series](maths/sum_of_arithmetic_series.py) * [Sum Of Digits](maths/sum_of_digits.py) @@ -753,7 +747,6 @@ * [Spiral Print](matrix/spiral_print.py) * Tests * [Test Matrix Operation](matrix/tests/test_matrix_operation.py) - * [Validate Sudoku Board](matrix/validate_sudoku_board.py) ## Networking Flow * [Ford Fulkerson](networking_flow/ford_fulkerson.py) @@ -829,7 +822,6 @@ * [Rms Speed Of Molecule](physics/rms_speed_of_molecule.py) * [Shear Stress](physics/shear_stress.py) * [Speed Of Sound](physics/speed_of_sound.py) - * [Speeds Of Gas Molecules](physics/speeds_of_gas_molecules.py) ## Project Euler * Problem 001 @@ -1220,7 +1212,6 @@ * [Capitalize](strings/capitalize.py) * [Check Anagrams](strings/check_anagrams.py) * [Credit Card Validator](strings/credit_card_validator.py) - * [Damerau Levenshtein Distance](strings/damerau_levenshtein_distance.py) * [Detecting English Programmatically](strings/detecting_english_programmatically.py) * [Dna](strings/dna.py) * [Edit Distance](strings/edit_distance.py) @@ -1255,7 +1246,6 @@ * [String Switch Case](strings/string_switch_case.py) * [Strip](strings/strip.py) * [Text Justification](strings/text_justification.py) - * [Title](strings/title.py) * [Top K Frequent Words](strings/top_k_frequent_words.py) * [Upper](strings/upper.py) * [Wave](strings/wave.py) diff --git a/graphs/check_bipartite_graph_bfs.py b/graphs/check_bipartite_graph_bfs.py deleted file mode 100644 index 6c385d54e..000000000 --- a/graphs/check_bipartite_graph_bfs.py +++ /dev/null @@ -1,92 +0,0 @@ -# Check whether Graph is Bipartite or Not using BFS - - -# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, -# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex -# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, -# or u belongs to V and v to U. We can also say that there is no edge that connects -# vertices of same set. -from queue import Queue - - -def check_bipartite(graph): - """ - >>> check_bipartite({}) - True - >>> check_bipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}) - True - >>> check_bipartite({0: [1, 2, 3], 1: [0, 2], 2: [0, 1, 3], 3: [0, 2]}) - False - >>> check_bipartite({0: [4], 1: [], 2: [4], 3: [4], 4: [0, 2, 3]}) - True - >>> check_bipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]}) - False - >>> check_bipartite({7: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]}) - Traceback (most recent call last): - ... - KeyError: 0 - >>> check_bipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 9: [0]}) - Traceback (most recent call last): - ... - KeyError: 4 - >>> check_bipartite({0: [-1, 3], 1: [0, -2]}) - Traceback (most recent call last): - ... - IndexError: list index out of range - >>> check_bipartite({-1: [0, 2], 0: [-1, 1], 1: [0, 2], 2: [-1, 1]}) - True - >>> check_bipartite({0.9: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}) - Traceback (most recent call last): - ... - KeyError: 0 - >>> check_bipartite({0: [1.0, 3.0], 1.0: [0, 2.0], 2.0: [1.0, 3.0], 3.0: [0, 2.0]}) - Traceback (most recent call last): - ... - TypeError: list indices must be integers or slices, not float - >>> check_bipartite({"a": [1, 3], "b": [0, 2], "c": [1, 3], "d": [0, 2]}) - Traceback (most recent call last): - ... - KeyError: 0 - >>> check_bipartite({0: ["b", "d"], 1: ["a", "c"], 2: ["b", "d"], 3: ["a", "c"]}) - Traceback (most recent call last): - ... - TypeError: list indices must be integers or slices, not str - """ - queue = Queue() - visited = [False] * len(graph) - color = [-1] * len(graph) - - def bfs(): - while not queue.empty(): - u = queue.get() - visited[u] = True - - for neighbour in graph[u]: - if neighbour == u: - return False - - if color[neighbour] == -1: - color[neighbour] = 1 - color[u] - queue.put(neighbour) - - elif color[neighbour] == color[u]: - return False - - return True - - for i in range(len(graph)): - if not visited[i]: - queue.put(i) - color[i] = 0 - if bfs() is False: - return False - - return True - - -if __name__ == "__main__": - # Adjacency List of graph - print(check_bipartite({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]})) - import doctest - - doctest.testmod() diff --git a/graphs/check_bipartite_graph_dfs.py b/graphs/check_bipartite_graph_dfs.py deleted file mode 100644 index b13a9eb95..000000000 --- a/graphs/check_bipartite_graph_dfs.py +++ /dev/null @@ -1,55 +0,0 @@ -from collections import defaultdict - - -def is_bipartite(graph: defaultdict[int, list[int]]) -> bool: - """ - Check whether a graph is Bipartite or not using Depth-First Search (DFS). - - A Bipartite Graph is a graph whose vertices can be divided into two independent - sets, U and V such that every edge (u, v) either connects a vertex from - U to V or a vertex from V to U. In other words, for every edge (u, v), - either u belongs to U and v to V, or u belongs to V and v to U. There is - no edge that connects vertices of the same set. - - Args: - graph: An adjacency list representing the graph. - - Returns: - True if there's no edge that connects vertices of the same set, False otherwise. - - Examples: - >>> is_bipartite( - ... defaultdict(list, {0: [1, 2], 1: [0, 3], 2: [0, 4], 3: [1], 4: [2]}) - ... ) - False - >>> is_bipartite(defaultdict(list, {0: [1, 2], 1: [0, 2], 2: [0, 1]})) - True - """ - - def depth_first_search(node: int, color: int) -> bool: - visited[node] = color - return any( - visited[neighbour] == color - or ( - visited[neighbour] == -1 - and not depth_first_search(neighbour, 1 - color) - ) - for neighbour in graph[node] - ) - - visited: defaultdict[int, int] = defaultdict(lambda: -1) - - return all( - not (visited[node] == -1 and not depth_first_search(node, 0)) for node in graph - ) - - -if __name__ == "__main__": - import doctest - - result = doctest.testmod() - - if result.failed: - print(f"{result.failed} test(s) failed.") - else: - print("All tests passed!") diff --git a/graphs/check_bipatrite.py b/graphs/check_bipatrite.py new file mode 100644 index 000000000..10b9cc965 --- /dev/null +++ b/graphs/check_bipatrite.py @@ -0,0 +1,179 @@ +from collections import defaultdict, deque + + +def is_bipartite_dfs(graph: defaultdict[int, list[int]]) -> bool: + """ + Check if a graph is bipartite using depth-first search (DFS). + + Args: + graph: Adjacency list representing the graph. + + Returns: + True if bipartite, False otherwise. + + Checks if the graph can be divided into two sets of vertices, such that no two + vertices within the same set are connected by an edge. + + Examples: + # FIXME: This test should pass. + >>> is_bipartite_dfs(defaultdict(list, {0: [1, 2], 1: [0, 3], 2: [0, 4]})) + Traceback (most recent call last): + ... + RuntimeError: dictionary changed size during iteration + >>> is_bipartite_dfs(defaultdict(list, {0: [1, 2], 1: [0, 3], 2: [0, 1]})) + False + >>> is_bipartite_dfs({}) + True + >>> is_bipartite_dfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}) + True + >>> is_bipartite_dfs({0: [1, 2, 3], 1: [0, 2], 2: [0, 1, 3], 3: [0, 2]}) + False + >>> is_bipartite_dfs({0: [4], 1: [], 2: [4], 3: [4], 4: [0, 2, 3]}) + True + >>> is_bipartite_dfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]}) + False + >>> is_bipartite_dfs({7: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]}) + Traceback (most recent call last): + ... + KeyError: 0 + + # FIXME: This test should fails with KeyError: 4. + >>> is_bipartite_dfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 9: [0]}) + False + >>> is_bipartite_dfs({0: [-1, 3], 1: [0, -2]}) + Traceback (most recent call last): + ... + KeyError: -1 + >>> is_bipartite_dfs({-1: [0, 2], 0: [-1, 1], 1: [0, 2], 2: [-1, 1]}) + True + >>> is_bipartite_dfs({0.9: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}) + Traceback (most recent call last): + ... + KeyError: 0 + + # FIXME: This test should fails with TypeError: list indices must be integers or... + >>> is_bipartite_dfs({0: [1.0, 3.0], 1.0: [0, 2.0], 2.0: [1.0, 3.0], 3.0: [0, 2.0]}) + True + >>> is_bipartite_dfs({"a": [1, 3], "b": [0, 2], "c": [1, 3], "d": [0, 2]}) + Traceback (most recent call last): + ... + KeyError: 1 + >>> is_bipartite_dfs({0: ["b", "d"], 1: ["a", "c"], 2: ["b", "d"], 3: ["a", "c"]}) + Traceback (most recent call last): + ... + KeyError: 'b' + """ + + def depth_first_search(node: int, color: int) -> bool: + """ + Perform Depth-First Search (DFS) on the graph starting from a node. + + Args: + node: The current node being visited. + color: The color assigned to the current node. + + Returns: + True if the graph is bipartite starting from the current node, + False otherwise. + """ + if visited[node] == -1: + visited[node] = color + for neighbor in graph[node]: + if not depth_first_search(neighbor, 1 - color): + return False + return visited[node] == color + + visited: defaultdict[int, int] = defaultdict(lambda: -1) + for node in graph: + if visited[node] == -1 and not depth_first_search(node, 0): + return False + return True + + +def is_bipartite_bfs(graph: defaultdict[int, list[int]]) -> bool: + """ + Check if a graph is bipartite using a breadth-first search (BFS). + + Args: + graph: Adjacency list representing the graph. + + Returns: + True if bipartite, False otherwise. + + Check if the graph can be divided into two sets of vertices, such that no two + vertices within the same set are connected by an edge. + + Examples: + # FIXME: This test should pass. + >>> is_bipartite_bfs(defaultdict(list, {0: [1, 2], 1: [0, 3], 2: [0, 4]})) + Traceback (most recent call last): + ... + RuntimeError: dictionary changed size during iteration + >>> is_bipartite_bfs(defaultdict(list, {0: [1, 2], 1: [0, 2], 2: [0, 1]})) + False + >>> is_bipartite_bfs({}) + True + >>> is_bipartite_bfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}) + True + >>> is_bipartite_bfs({0: [1, 2, 3], 1: [0, 2], 2: [0, 1, 3], 3: [0, 2]}) + False + >>> is_bipartite_bfs({0: [4], 1: [], 2: [4], 3: [4], 4: [0, 2, 3]}) + True + >>> is_bipartite_bfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]}) + False + >>> is_bipartite_bfs({7: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]}) + Traceback (most recent call last): + ... + KeyError: 0 + + # FIXME: This test should fails with KeyError: 4. + >>> is_bipartite_bfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 9: [0]}) + False + >>> is_bipartite_bfs({0: [-1, 3], 1: [0, -2]}) + Traceback (most recent call last): + ... + KeyError: -1 + >>> is_bipartite_bfs({-1: [0, 2], 0: [-1, 1], 1: [0, 2], 2: [-1, 1]}) + True + >>> is_bipartite_bfs({0.9: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}) + Traceback (most recent call last): + ... + KeyError: 0 + + # FIXME: This test should fails with TypeError: list indices must be integers or... + >>> is_bipartite_bfs({0: [1.0, 3.0], 1.0: [0, 2.0], 2.0: [1.0, 3.0], 3.0: [0, 2.0]}) + True + >>> is_bipartite_bfs({"a": [1, 3], "b": [0, 2], "c": [1, 3], "d": [0, 2]}) + Traceback (most recent call last): + ... + KeyError: 1 + >>> is_bipartite_bfs({0: ["b", "d"], 1: ["a", "c"], 2: ["b", "d"], 3: ["a", "c"]}) + Traceback (most recent call last): + ... + KeyError: 'b' + """ + visited: defaultdict[int, int] = defaultdict(lambda: -1) + for node in graph: + if visited[node] == -1: + queue: deque[int] = deque() + queue.append(node) + visited[node] = 0 + while queue: + curr_node = queue.popleft() + for neighbor in graph[curr_node]: + if visited[neighbor] == -1: + visited[neighbor] = 1 - visited[curr_node] + queue.append(neighbor) + elif visited[neighbor] == visited[curr_node]: + return False + return True + + +if __name__ == "__main": + import doctest + + result = doctest.testmod() + if result.failed: + print(f"{result.failed} test(s) failed.") + else: + print("All tests passed!") From 6f2d6f72d56f832dcfaaf226688c1dab4cdb9d0e Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Fri, 20 Oct 2023 02:17:31 -0400 Subject: [PATCH 182/306] Move files for special numbers to own directory (#10714) --- .../armstrong_numbers.py | 196 +++++------ .../automorphic_number.py | 0 maths/{ => special_numbers}/bell_numbers.py | 0 .../carmichael_number.py | 0 maths/{ => special_numbers}/catalan_number.py | 0 .../{ => special_numbers}/hamming_numbers.py | 0 .../{ => special_numbers}/harshad_numbers.py | 316 +++++++++--------- .../{ => special_numbers}/hexagonal_number.py | 0 .../krishnamurthy_number.py | 0 maths/{ => special_numbers}/perfect_number.py | 0 .../polygonal_numbers.py | 0 maths/{ => special_numbers}/pronic_number.py | 0 maths/{ => special_numbers}/proth_number.py | 0 maths/{ => special_numbers}/ugly_numbers.py | 108 +++--- maths/{ => special_numbers}/weird_number.py | 0 15 files changed, 310 insertions(+), 310 deletions(-) rename maths/{ => special_numbers}/armstrong_numbers.py (96%) rename maths/{ => special_numbers}/automorphic_number.py (100%) rename maths/{ => special_numbers}/bell_numbers.py (100%) rename maths/{ => special_numbers}/carmichael_number.py (100%) rename maths/{ => special_numbers}/catalan_number.py (100%) rename maths/{ => special_numbers}/hamming_numbers.py (100%) rename maths/{ => special_numbers}/harshad_numbers.py (96%) rename maths/{ => special_numbers}/hexagonal_number.py (100%) rename maths/{ => special_numbers}/krishnamurthy_number.py (100%) rename maths/{ => special_numbers}/perfect_number.py (100%) rename maths/{ => special_numbers}/polygonal_numbers.py (100%) rename maths/{ => special_numbers}/pronic_number.py (100%) rename maths/{ => special_numbers}/proth_number.py (100%) rename maths/{ => special_numbers}/ugly_numbers.py (96%) rename maths/{ => special_numbers}/weird_number.py (100%) diff --git a/maths/armstrong_numbers.py b/maths/special_numbers/armstrong_numbers.py similarity index 96% rename from maths/armstrong_numbers.py rename to maths/special_numbers/armstrong_numbers.py index e1c25d467..b037aacb1 100644 --- a/maths/armstrong_numbers.py +++ b/maths/special_numbers/armstrong_numbers.py @@ -1,98 +1,98 @@ -""" -An Armstrong number is equal to the sum of its own digits each raised to the -power of the number of digits. - -For example, 370 is an Armstrong number because 3*3*3 + 7*7*7 + 0*0*0 = 370. - -Armstrong numbers are also called Narcissistic numbers and Pluperfect numbers. - -On-Line Encyclopedia of Integer Sequences entry: https://oeis.org/A005188 -""" -PASSING = (1, 153, 370, 371, 1634, 24678051, 115132219018763992565095597973971522401) -FAILING: tuple = (-153, -1, 0, 1.2, 200, "A", [], {}, None) - - -def armstrong_number(n: int) -> bool: - """ - Return True if n is an Armstrong number or False if it is not. - - >>> all(armstrong_number(n) for n in PASSING) - True - >>> any(armstrong_number(n) for n in FAILING) - False - """ - if not isinstance(n, int) or n < 1: - return False - - # Initialization of sum and number of digits. - total = 0 - number_of_digits = 0 - temp = n - # Calculation of digits of the number - number_of_digits = len(str(n)) - # Dividing number into separate digits and find Armstrong number - temp = n - while temp > 0: - rem = temp % 10 - total += rem**number_of_digits - temp //= 10 - return n == total - - -def pluperfect_number(n: int) -> bool: - """Return True if n is a pluperfect number or False if it is not - - >>> all(armstrong_number(n) for n in PASSING) - True - >>> any(armstrong_number(n) for n in FAILING) - False - """ - if not isinstance(n, int) or n < 1: - return False - - # Init a "histogram" of the digits - digit_histogram = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - digit_total = 0 - total = 0 - temp = n - while temp > 0: - temp, rem = divmod(temp, 10) - digit_histogram[rem] += 1 - digit_total += 1 - - for cnt, i in zip(digit_histogram, range(len(digit_histogram))): - total += cnt * i**digit_total - - return n == total - - -def narcissistic_number(n: int) -> bool: - """Return True if n is a narcissistic number or False if it is not. - - >>> all(armstrong_number(n) for n in PASSING) - True - >>> any(armstrong_number(n) for n in FAILING) - False - """ - if not isinstance(n, int) or n < 1: - return False - expo = len(str(n)) # the power that all digits will be raised to - # check if sum of each digit multiplied expo times is equal to number - return n == sum(int(i) ** expo for i in str(n)) - - -def main(): - """ - Request that user input an integer and tell them if it is Armstrong number. - """ - num = int(input("Enter an integer to see if it is an Armstrong number: ").strip()) - print(f"{num} is {'' if armstrong_number(num) else 'not '}an Armstrong number.") - print(f"{num} is {'' if narcissistic_number(num) else 'not '}an Armstrong number.") - print(f"{num} is {'' if pluperfect_number(num) else 'not '}an Armstrong number.") - - -if __name__ == "__main__": - import doctest - - doctest.testmod() - main() +""" +An Armstrong number is equal to the sum of its own digits each raised to the +power of the number of digits. + +For example, 370 is an Armstrong number because 3*3*3 + 7*7*7 + 0*0*0 = 370. + +Armstrong numbers are also called Narcissistic numbers and Pluperfect numbers. + +On-Line Encyclopedia of Integer Sequences entry: https://oeis.org/A005188 +""" +PASSING = (1, 153, 370, 371, 1634, 24678051, 115132219018763992565095597973971522401) +FAILING: tuple = (-153, -1, 0, 1.2, 200, "A", [], {}, None) + + +def armstrong_number(n: int) -> bool: + """ + Return True if n is an Armstrong number or False if it is not. + + >>> all(armstrong_number(n) for n in PASSING) + True + >>> any(armstrong_number(n) for n in FAILING) + False + """ + if not isinstance(n, int) or n < 1: + return False + + # Initialization of sum and number of digits. + total = 0 + number_of_digits = 0 + temp = n + # Calculation of digits of the number + number_of_digits = len(str(n)) + # Dividing number into separate digits and find Armstrong number + temp = n + while temp > 0: + rem = temp % 10 + total += rem**number_of_digits + temp //= 10 + return n == total + + +def pluperfect_number(n: int) -> bool: + """Return True if n is a pluperfect number or False if it is not + + >>> all(armstrong_number(n) for n in PASSING) + True + >>> any(armstrong_number(n) for n in FAILING) + False + """ + if not isinstance(n, int) or n < 1: + return False + + # Init a "histogram" of the digits + digit_histogram = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + digit_total = 0 + total = 0 + temp = n + while temp > 0: + temp, rem = divmod(temp, 10) + digit_histogram[rem] += 1 + digit_total += 1 + + for cnt, i in zip(digit_histogram, range(len(digit_histogram))): + total += cnt * i**digit_total + + return n == total + + +def narcissistic_number(n: int) -> bool: + """Return True if n is a narcissistic number or False if it is not. + + >>> all(armstrong_number(n) for n in PASSING) + True + >>> any(armstrong_number(n) for n in FAILING) + False + """ + if not isinstance(n, int) or n < 1: + return False + expo = len(str(n)) # the power that all digits will be raised to + # check if sum of each digit multiplied expo times is equal to number + return n == sum(int(i) ** expo for i in str(n)) + + +def main(): + """ + Request that user input an integer and tell them if it is Armstrong number. + """ + num = int(input("Enter an integer to see if it is an Armstrong number: ").strip()) + print(f"{num} is {'' if armstrong_number(num) else 'not '}an Armstrong number.") + print(f"{num} is {'' if narcissistic_number(num) else 'not '}an Armstrong number.") + print(f"{num} is {'' if pluperfect_number(num) else 'not '}an Armstrong number.") + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + main() diff --git a/maths/automorphic_number.py b/maths/special_numbers/automorphic_number.py similarity index 100% rename from maths/automorphic_number.py rename to maths/special_numbers/automorphic_number.py diff --git a/maths/bell_numbers.py b/maths/special_numbers/bell_numbers.py similarity index 100% rename from maths/bell_numbers.py rename to maths/special_numbers/bell_numbers.py diff --git a/maths/carmichael_number.py b/maths/special_numbers/carmichael_number.py similarity index 100% rename from maths/carmichael_number.py rename to maths/special_numbers/carmichael_number.py diff --git a/maths/catalan_number.py b/maths/special_numbers/catalan_number.py similarity index 100% rename from maths/catalan_number.py rename to maths/special_numbers/catalan_number.py diff --git a/maths/hamming_numbers.py b/maths/special_numbers/hamming_numbers.py similarity index 100% rename from maths/hamming_numbers.py rename to maths/special_numbers/hamming_numbers.py diff --git a/maths/harshad_numbers.py b/maths/special_numbers/harshad_numbers.py similarity index 96% rename from maths/harshad_numbers.py rename to maths/special_numbers/harshad_numbers.py index 050c69e0b..61667adfa 100644 --- a/maths/harshad_numbers.py +++ b/maths/special_numbers/harshad_numbers.py @@ -1,158 +1,158 @@ -""" -A harshad number (or more specifically an n-harshad number) is a number that's -divisible by the sum of its digits in some given base n. -Reference: https://en.wikipedia.org/wiki/Harshad_number -""" - - -def int_to_base(number: int, base: int) -> str: - """ - Convert a given positive decimal integer to base 'base'. - Where 'base' ranges from 2 to 36. - - Examples: - >>> int_to_base(23, 2) - '10111' - >>> int_to_base(58, 5) - '213' - >>> int_to_base(167, 16) - 'A7' - >>> # bases below 2 and beyond 36 will error - >>> int_to_base(98, 1) - Traceback (most recent call last): - ... - ValueError: 'base' must be between 2 and 36 inclusive - >>> int_to_base(98, 37) - Traceback (most recent call last): - ... - ValueError: 'base' must be between 2 and 36 inclusive - """ - - if base < 2 or base > 36: - raise ValueError("'base' must be between 2 and 36 inclusive") - - digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" - result = "" - - if number < 0: - raise ValueError("number must be a positive integer") - - while number > 0: - number, remainder = divmod(number, base) - result = digits[remainder] + result - - if result == "": - result = "0" - - return result - - -def sum_of_digits(num: int, base: int) -> str: - """ - Calculate the sum of digit values in a positive integer - converted to the given 'base'. - Where 'base' ranges from 2 to 36. - - Examples: - >>> sum_of_digits(103, 12) - '13' - >>> sum_of_digits(1275, 4) - '30' - >>> sum_of_digits(6645, 2) - '1001' - >>> # bases below 2 and beyond 36 will error - >>> sum_of_digits(543, 1) - Traceback (most recent call last): - ... - ValueError: 'base' must be between 2 and 36 inclusive - >>> sum_of_digits(543, 37) - Traceback (most recent call last): - ... - ValueError: 'base' must be between 2 and 36 inclusive - """ - - if base < 2 or base > 36: - raise ValueError("'base' must be between 2 and 36 inclusive") - - num_str = int_to_base(num, base) - res = sum(int(char, base) for char in num_str) - res_str = int_to_base(res, base) - return res_str - - -def harshad_numbers_in_base(limit: int, base: int) -> list[str]: - """ - Finds all Harshad numbers smaller than num in base 'base'. - Where 'base' ranges from 2 to 36. - - Examples: - >>> harshad_numbers_in_base(15, 2) - ['1', '10', '100', '110', '1000', '1010', '1100'] - >>> harshad_numbers_in_base(12, 34) - ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B'] - >>> harshad_numbers_in_base(12, 4) - ['1', '2', '3', '10', '12', '20', '21'] - >>> # bases below 2 and beyond 36 will error - >>> harshad_numbers_in_base(234, 37) - Traceback (most recent call last): - ... - ValueError: 'base' must be between 2 and 36 inclusive - >>> harshad_numbers_in_base(234, 1) - Traceback (most recent call last): - ... - ValueError: 'base' must be between 2 and 36 inclusive - """ - - if base < 2 or base > 36: - raise ValueError("'base' must be between 2 and 36 inclusive") - - if limit < 0: - return [] - - numbers = [ - int_to_base(i, base) - for i in range(1, limit) - if i % int(sum_of_digits(i, base), base) == 0 - ] - - return numbers - - -def is_harshad_number_in_base(num: int, base: int) -> bool: - """ - Determines whether n in base 'base' is a harshad number. - Where 'base' ranges from 2 to 36. - - Examples: - >>> is_harshad_number_in_base(18, 10) - True - >>> is_harshad_number_in_base(21, 10) - True - >>> is_harshad_number_in_base(-21, 5) - False - >>> # bases below 2 and beyond 36 will error - >>> is_harshad_number_in_base(45, 37) - Traceback (most recent call last): - ... - ValueError: 'base' must be between 2 and 36 inclusive - >>> is_harshad_number_in_base(45, 1) - Traceback (most recent call last): - ... - ValueError: 'base' must be between 2 and 36 inclusive - """ - - if base < 2 or base > 36: - raise ValueError("'base' must be between 2 and 36 inclusive") - - if num < 0: - return False - - n = int_to_base(num, base) - d = sum_of_digits(num, base) - return int(n, base) % int(d, base) == 0 - - -if __name__ == "__main__": - import doctest - - doctest.testmod() +""" +A harshad number (or more specifically an n-harshad number) is a number that's +divisible by the sum of its digits in some given base n. +Reference: https://en.wikipedia.org/wiki/Harshad_number +""" + + +def int_to_base(number: int, base: int) -> str: + """ + Convert a given positive decimal integer to base 'base'. + Where 'base' ranges from 2 to 36. + + Examples: + >>> int_to_base(23, 2) + '10111' + >>> int_to_base(58, 5) + '213' + >>> int_to_base(167, 16) + 'A7' + >>> # bases below 2 and beyond 36 will error + >>> int_to_base(98, 1) + Traceback (most recent call last): + ... + ValueError: 'base' must be between 2 and 36 inclusive + >>> int_to_base(98, 37) + Traceback (most recent call last): + ... + ValueError: 'base' must be between 2 and 36 inclusive + """ + + if base < 2 or base > 36: + raise ValueError("'base' must be between 2 and 36 inclusive") + + digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" + result = "" + + if number < 0: + raise ValueError("number must be a positive integer") + + while number > 0: + number, remainder = divmod(number, base) + result = digits[remainder] + result + + if result == "": + result = "0" + + return result + + +def sum_of_digits(num: int, base: int) -> str: + """ + Calculate the sum of digit values in a positive integer + converted to the given 'base'. + Where 'base' ranges from 2 to 36. + + Examples: + >>> sum_of_digits(103, 12) + '13' + >>> sum_of_digits(1275, 4) + '30' + >>> sum_of_digits(6645, 2) + '1001' + >>> # bases below 2 and beyond 36 will error + >>> sum_of_digits(543, 1) + Traceback (most recent call last): + ... + ValueError: 'base' must be between 2 and 36 inclusive + >>> sum_of_digits(543, 37) + Traceback (most recent call last): + ... + ValueError: 'base' must be between 2 and 36 inclusive + """ + + if base < 2 or base > 36: + raise ValueError("'base' must be between 2 and 36 inclusive") + + num_str = int_to_base(num, base) + res = sum(int(char, base) for char in num_str) + res_str = int_to_base(res, base) + return res_str + + +def harshad_numbers_in_base(limit: int, base: int) -> list[str]: + """ + Finds all Harshad numbers smaller than num in base 'base'. + Where 'base' ranges from 2 to 36. + + Examples: + >>> harshad_numbers_in_base(15, 2) + ['1', '10', '100', '110', '1000', '1010', '1100'] + >>> harshad_numbers_in_base(12, 34) + ['1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B'] + >>> harshad_numbers_in_base(12, 4) + ['1', '2', '3', '10', '12', '20', '21'] + >>> # bases below 2 and beyond 36 will error + >>> harshad_numbers_in_base(234, 37) + Traceback (most recent call last): + ... + ValueError: 'base' must be between 2 and 36 inclusive + >>> harshad_numbers_in_base(234, 1) + Traceback (most recent call last): + ... + ValueError: 'base' must be between 2 and 36 inclusive + """ + + if base < 2 or base > 36: + raise ValueError("'base' must be between 2 and 36 inclusive") + + if limit < 0: + return [] + + numbers = [ + int_to_base(i, base) + for i in range(1, limit) + if i % int(sum_of_digits(i, base), base) == 0 + ] + + return numbers + + +def is_harshad_number_in_base(num: int, base: int) -> bool: + """ + Determines whether n in base 'base' is a harshad number. + Where 'base' ranges from 2 to 36. + + Examples: + >>> is_harshad_number_in_base(18, 10) + True + >>> is_harshad_number_in_base(21, 10) + True + >>> is_harshad_number_in_base(-21, 5) + False + >>> # bases below 2 and beyond 36 will error + >>> is_harshad_number_in_base(45, 37) + Traceback (most recent call last): + ... + ValueError: 'base' must be between 2 and 36 inclusive + >>> is_harshad_number_in_base(45, 1) + Traceback (most recent call last): + ... + ValueError: 'base' must be between 2 and 36 inclusive + """ + + if base < 2 or base > 36: + raise ValueError("'base' must be between 2 and 36 inclusive") + + if num < 0: + return False + + n = int_to_base(num, base) + d = sum_of_digits(num, base) + return int(n, base) % int(d, base) == 0 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/maths/hexagonal_number.py b/maths/special_numbers/hexagonal_number.py similarity index 100% rename from maths/hexagonal_number.py rename to maths/special_numbers/hexagonal_number.py diff --git a/maths/krishnamurthy_number.py b/maths/special_numbers/krishnamurthy_number.py similarity index 100% rename from maths/krishnamurthy_number.py rename to maths/special_numbers/krishnamurthy_number.py diff --git a/maths/perfect_number.py b/maths/special_numbers/perfect_number.py similarity index 100% rename from maths/perfect_number.py rename to maths/special_numbers/perfect_number.py diff --git a/maths/polygonal_numbers.py b/maths/special_numbers/polygonal_numbers.py similarity index 100% rename from maths/polygonal_numbers.py rename to maths/special_numbers/polygonal_numbers.py diff --git a/maths/pronic_number.py b/maths/special_numbers/pronic_number.py similarity index 100% rename from maths/pronic_number.py rename to maths/special_numbers/pronic_number.py diff --git a/maths/proth_number.py b/maths/special_numbers/proth_number.py similarity index 100% rename from maths/proth_number.py rename to maths/special_numbers/proth_number.py diff --git a/maths/ugly_numbers.py b/maths/special_numbers/ugly_numbers.py similarity index 96% rename from maths/ugly_numbers.py rename to maths/special_numbers/ugly_numbers.py index 81bd928c6..c6ceb7846 100644 --- a/maths/ugly_numbers.py +++ b/maths/special_numbers/ugly_numbers.py @@ -1,54 +1,54 @@ -""" -Ugly numbers are numbers whose only prime factors are 2, 3 or 5. The sequence -1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, … shows the first 11 ugly numbers. By convention, -1 is included. -Given an integer n, we have to find the nth ugly number. - -For more details, refer this article -https://www.geeksforgeeks.org/ugly-numbers/ -""" - - -def ugly_numbers(n: int) -> int: - """ - Returns the nth ugly number. - >>> ugly_numbers(100) - 1536 - >>> ugly_numbers(0) - 1 - >>> ugly_numbers(20) - 36 - >>> ugly_numbers(-5) - 1 - >>> ugly_numbers(-5.5) - Traceback (most recent call last): - ... - TypeError: 'float' object cannot be interpreted as an integer - """ - ugly_nums = [1] - - i2, i3, i5 = 0, 0, 0 - next_2 = ugly_nums[i2] * 2 - next_3 = ugly_nums[i3] * 3 - next_5 = ugly_nums[i5] * 5 - - for _ in range(1, n): - next_num = min(next_2, next_3, next_5) - ugly_nums.append(next_num) - if next_num == next_2: - i2 += 1 - next_2 = ugly_nums[i2] * 2 - if next_num == next_3: - i3 += 1 - next_3 = ugly_nums[i3] * 3 - if next_num == next_5: - i5 += 1 - next_5 = ugly_nums[i5] * 5 - return ugly_nums[-1] - - -if __name__ == "__main__": - from doctest import testmod - - testmod(verbose=True) - print(f"{ugly_numbers(200) = }") +""" +Ugly numbers are numbers whose only prime factors are 2, 3 or 5. The sequence +1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, … shows the first 11 ugly numbers. By convention, +1 is included. +Given an integer n, we have to find the nth ugly number. + +For more details, refer this article +https://www.geeksforgeeks.org/ugly-numbers/ +""" + + +def ugly_numbers(n: int) -> int: + """ + Returns the nth ugly number. + >>> ugly_numbers(100) + 1536 + >>> ugly_numbers(0) + 1 + >>> ugly_numbers(20) + 36 + >>> ugly_numbers(-5) + 1 + >>> ugly_numbers(-5.5) + Traceback (most recent call last): + ... + TypeError: 'float' object cannot be interpreted as an integer + """ + ugly_nums = [1] + + i2, i3, i5 = 0, 0, 0 + next_2 = ugly_nums[i2] * 2 + next_3 = ugly_nums[i3] * 3 + next_5 = ugly_nums[i5] * 5 + + for _ in range(1, n): + next_num = min(next_2, next_3, next_5) + ugly_nums.append(next_num) + if next_num == next_2: + i2 += 1 + next_2 = ugly_nums[i2] * 2 + if next_num == next_3: + i3 += 1 + next_3 = ugly_nums[i3] * 3 + if next_num == next_5: + i5 += 1 + next_5 = ugly_nums[i5] * 5 + return ugly_nums[-1] + + +if __name__ == "__main__": + from doctest import testmod + + testmod(verbose=True) + print(f"{ugly_numbers(200) = }") diff --git a/maths/weird_number.py b/maths/special_numbers/weird_number.py similarity index 100% rename from maths/weird_number.py rename to maths/special_numbers/weird_number.py From ce0ede6476fb14ba18ef03246b169a7e5615bdec Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Fri, 20 Oct 2023 03:08:23 -0400 Subject: [PATCH 183/306] Fix typo in DPLL file name (#10723) * Fix DPLL file name * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 46 +++++++++++-------- ...d.py => davis_putnam_logemann_loveland.py} | 0 2 files changed, 28 insertions(+), 18 deletions(-) rename other/{davisb_putnamb_logemannb_loveland.py => davis_putnam_logemann_loveland.py} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 1320c70ef..5b7ca856e 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -65,7 +65,9 @@ ## Boolean Algebra * [And Gate](boolean_algebra/and_gate.py) + * [Imply Gate](boolean_algebra/imply_gate.py) * [Nand Gate](boolean_algebra/nand_gate.py) + * [Nimply Gate](boolean_algebra/nimply_gate.py) * [Nor Gate](boolean_algebra/nor_gate.py) * [Not Gate](boolean_algebra/not_gate.py) * [Or Gate](boolean_algebra/or_gate.py) @@ -178,7 +180,9 @@ ## Data Structures * Arrays * [Equilibrium Index In Array](data_structures/arrays/equilibrium_index_in_array.py) + * [Find Triplets With 0 Sum](data_structures/arrays/find_triplets_with_0_sum.py) * [Median Two Array](data_structures/arrays/median_two_array.py) + * [Pairs With Given Sum](data_structures/arrays/pairs_with_given_sum.py) * [Permutations](data_structures/arrays/permutations.py) * [Prefix Sum](data_structures/arrays/prefix_sum.py) * [Product Sum](data_structures/arrays/product_sum.py) @@ -398,6 +402,7 @@ ## Financial * [Equated Monthly Installments](financial/equated_monthly_installments.py) + * [Exponential Moving Average](financial/exponential_moving_average.py) * [Interest](financial/interest.py) * [Present Value](financial/present_value.py) * [Price Plus Tax](financial/price_plus_tax.py) @@ -433,8 +438,7 @@ * [Breadth First Search Shortest Path](graphs/breadth_first_search_shortest_path.py) * [Breadth First Search Shortest Path 2](graphs/breadth_first_search_shortest_path_2.py) * [Breadth First Search Zero One Shortest Path](graphs/breadth_first_search_zero_one_shortest_path.py) - * [Check Bipartite Graph Bfs](graphs/check_bipartite_graph_bfs.py) - * [Check Bipartite Graph Dfs](graphs/check_bipartite_graph_dfs.py) + * [Check Bipatrite](graphs/check_bipatrite.py) * [Check Cycle](graphs/check_cycle.py) * [Connected Components](graphs/connected_components.py) * [Deep Clone Graph](graphs/deep_clone_graph.py) @@ -572,8 +576,6 @@ * [Arc Length](maths/arc_length.py) * [Area](maths/area.py) * [Area Under Curve](maths/area_under_curve.py) - * [Armstrong Numbers](maths/armstrong_numbers.py) - * [Automorphic Number](maths/automorphic_number.py) * [Average Absolute Deviation](maths/average_absolute_deviation.py) * [Average Mean](maths/average_mean.py) * [Average Median](maths/average_median.py) @@ -581,7 +583,6 @@ * [Bailey Borwein Plouffe](maths/bailey_borwein_plouffe.py) * [Base Neg2 Conversion](maths/base_neg2_conversion.py) * [Basic Maths](maths/basic_maths.py) - * [Bell Numbers](maths/bell_numbers.py) * [Binary Exp Mod](maths/binary_exp_mod.py) * [Binary Exponentiation](maths/binary_exponentiation.py) * [Binary Exponentiation 2](maths/binary_exponentiation_2.py) @@ -589,8 +590,6 @@ * [Binomial Coefficient](maths/binomial_coefficient.py) * [Binomial Distribution](maths/binomial_distribution.py) * [Bisection](maths/bisection.py) - * [Carmichael Number](maths/carmichael_number.py) - * [Catalan Number](maths/catalan_number.py) * [Ceil](maths/ceil.py) * [Chebyshev Distance](maths/chebyshev_distance.py) * [Check Polygon](maths/check_polygon.py) @@ -623,10 +622,7 @@ * [Gcd Of N Numbers](maths/gcd_of_n_numbers.py) * [Germain Primes](maths/germain_primes.py) * [Greatest Common Divisor](maths/greatest_common_divisor.py) - * [Hamming Numbers](maths/hamming_numbers.py) * [Hardy Ramanujanalgo](maths/hardy_ramanujanalgo.py) - * [Harshad Numbers](maths/harshad_numbers.py) - * [Hexagonal Number](maths/hexagonal_number.py) * [Integration By Simpson Approx](maths/integration_by_simpson_approx.py) * [Interquartile Range](maths/interquartile_range.py) * [Is Int Palindrome](maths/is_int_palindrome.py) @@ -636,7 +632,6 @@ * [Joint Probability Distribution](maths/joint_probability_distribution.py) * [Juggler Sequence](maths/juggler_sequence.py) * [Karatsuba](maths/karatsuba.py) - * [Krishnamurthy Number](maths/krishnamurthy_number.py) * [Kth Lexicographic Permutation](maths/kth_lexicographic_permutation.py) * [Largest Of Very Large Numbers](maths/largest_of_very_large_numbers.py) * [Least Common Multiple](maths/least_common_multiple.py) @@ -661,14 +656,12 @@ * [Numerical Integration](maths/numerical_integration.py) * [Odd Sieve](maths/odd_sieve.py) * [Perfect Cube](maths/perfect_cube.py) - * [Perfect Number](maths/perfect_number.py) * [Perfect Square](maths/perfect_square.py) * [Persistence](maths/persistence.py) * [Pi Generator](maths/pi_generator.py) * [Pi Monte Carlo Estimation](maths/pi_monte_carlo_estimation.py) * [Points Are Collinear 3D](maths/points_are_collinear_3d.py) * [Pollard Rho](maths/pollard_rho.py) - * [Polygonal Numbers](maths/polygonal_numbers.py) * [Polynomial Evaluation](maths/polynomial_evaluation.py) * Polynomials * [Single Indeterminate Operations](maths/polynomials/single_indeterminate_operations.py) @@ -679,8 +672,6 @@ * [Prime Sieve Eratosthenes](maths/prime_sieve_eratosthenes.py) * [Primelib](maths/primelib.py) * [Print Multiplication Table](maths/print_multiplication_table.py) - * [Pronic Number](maths/pronic_number.py) - * [Proth Number](maths/proth_number.py) * [Pythagoras](maths/pythagoras.py) * [Qr Decomposition](maths/qr_decomposition.py) * [Quadratic Equations Complex Numbers](maths/quadratic_equations_complex_numbers.py) @@ -706,6 +697,23 @@ * [Sin](maths/sin.py) * [Sock Merchant](maths/sock_merchant.py) * [Softmax](maths/softmax.py) + * [Solovay Strassen Primality Test](maths/solovay_strassen_primality_test.py) + * Special Numbers + * [Armstrong Numbers](maths/special_numbers/armstrong_numbers.py) + * [Automorphic Number](maths/special_numbers/automorphic_number.py) + * [Bell Numbers](maths/special_numbers/bell_numbers.py) + * [Carmichael Number](maths/special_numbers/carmichael_number.py) + * [Catalan Number](maths/special_numbers/catalan_number.py) + * [Hamming Numbers](maths/special_numbers/hamming_numbers.py) + * [Harshad Numbers](maths/special_numbers/harshad_numbers.py) + * [Hexagonal Number](maths/special_numbers/hexagonal_number.py) + * [Krishnamurthy Number](maths/special_numbers/krishnamurthy_number.py) + * [Perfect Number](maths/special_numbers/perfect_number.py) + * [Polygonal Numbers](maths/special_numbers/polygonal_numbers.py) + * [Pronic Number](maths/special_numbers/pronic_number.py) + * [Proth Number](maths/special_numbers/proth_number.py) + * [Ugly Numbers](maths/special_numbers/ugly_numbers.py) + * [Weird Number](maths/special_numbers/weird_number.py) * [Square Root](maths/square_root.py) * [Sum Of Arithmetic Series](maths/sum_of_arithmetic_series.py) * [Sum Of Digits](maths/sum_of_digits.py) @@ -721,9 +729,7 @@ * [Twin Prime](maths/twin_prime.py) * [Two Pointer](maths/two_pointer.py) * [Two Sum](maths/two_sum.py) - * [Ugly Numbers](maths/ugly_numbers.py) * [Volume](maths/volume.py) - * [Weird Number](maths/weird_number.py) * [Zellers Congruence](maths/zellers_congruence.py) ## Matrix @@ -747,6 +753,7 @@ * [Spiral Print](matrix/spiral_print.py) * Tests * [Test Matrix Operation](matrix/tests/test_matrix_operation.py) + * [Validate Sudoku Board](matrix/validate_sudoku_board.py) ## Networking Flow * [Ford Fulkerson](networking_flow/ford_fulkerson.py) @@ -773,7 +780,7 @@ ## Other * [Activity Selection](other/activity_selection.py) * [Alternative List Arrange](other/alternative_list_arrange.py) - * [Davisb Putnamb Logemannb Loveland](other/davisb_putnamb_logemannb_loveland.py) + * [Davis Putnam Logemann Loveland](other/davis_putnam_logemann_loveland.py) * [Dijkstra Bankers Algorithm](other/dijkstra_bankers_algorithm.py) * [Doomsday](other/doomsday.py) * [Fischer Yates Shuffle](other/fischer_yates_shuffle.py) @@ -822,6 +829,7 @@ * [Rms Speed Of Molecule](physics/rms_speed_of_molecule.py) * [Shear Stress](physics/shear_stress.py) * [Speed Of Sound](physics/speed_of_sound.py) + * [Speeds Of Gas Molecules](physics/speeds_of_gas_molecules.py) ## Project Euler * Problem 001 @@ -1212,6 +1220,7 @@ * [Capitalize](strings/capitalize.py) * [Check Anagrams](strings/check_anagrams.py) * [Credit Card Validator](strings/credit_card_validator.py) + * [Damerau Levenshtein Distance](strings/damerau_levenshtein_distance.py) * [Detecting English Programmatically](strings/detecting_english_programmatically.py) * [Dna](strings/dna.py) * [Edit Distance](strings/edit_distance.py) @@ -1246,6 +1255,7 @@ * [String Switch Case](strings/string_switch_case.py) * [Strip](strings/strip.py) * [Text Justification](strings/text_justification.py) + * [Title](strings/title.py) * [Top K Frequent Words](strings/top_k_frequent_words.py) * [Upper](strings/upper.py) * [Wave](strings/wave.py) diff --git a/other/davisb_putnamb_logemannb_loveland.py b/other/davis_putnam_logemann_loveland.py similarity index 100% rename from other/davisb_putnamb_logemannb_loveland.py rename to other/davis_putnam_logemann_loveland.py From 579937613a6dc7e099b710e3d57767a2fab115ad Mon Sep 17 00:00:00 2001 From: Saptadeep Banerjee <69459134+imSanko@users.noreply.github.com> Date: Fri, 20 Oct 2023 16:32:30 +0530 Subject: [PATCH 184/306] Added New Tests in Signum (#10724) * Added new tests! * [ADD]: Inproved Tests * fixed * Removed spaces * Changed the file name * Added Changes * changed the code and kept the test cases * changed the code and kept the test cases * missed the line * removed spaces * Update power_using_recursion.py * Added new tests in Signum * Few things added * Removed few stuff and added few changes * Fixed few things * Reverted the function * Update maths/signum.py Co-authored-by: Christian Clauss * Added few things * Update maths/signum.py Co-authored-by: Christian Clauss * Added the type hint back * Update signum.py --------- Co-authored-by: Christian Clauss --- maths/signum.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/maths/signum.py b/maths/signum.py index 148f93176..c89753e76 100644 --- a/maths/signum.py +++ b/maths/signum.py @@ -7,12 +7,29 @@ def signum(num: float) -> int: """ Applies signum function on the number + Custom test cases: >>> signum(-10) -1 >>> signum(10) 1 >>> signum(0) 0 + >>> signum(-20.5) + -1 + >>> signum(20.5) + 1 + >>> signum(-1e-6) + -1 + >>> signum(1e-6) + 1 + >>> signum("Hello") + Traceback (most recent call last): + ... + TypeError: '<' not supported between instances of 'str' and 'int' + >>> signum([]) + Traceback (most recent call last): + ... + TypeError: '<' not supported between instances of 'list' and 'int' """ if num < 0: return -1 @@ -22,10 +39,17 @@ def signum(num: float) -> int: def test_signum() -> None: """ Tests the signum function + >>> test_signum() """ assert signum(5) == 1 assert signum(-5) == -1 assert signum(0) == 0 + assert signum(10.5) == 1 + assert signum(-10.5) == -1 + assert signum(1e-6) == 1 + assert signum(-1e-6) == -1 + assert signum(123456789) == 1 + assert signum(-123456789) == -1 if __name__ == "__main__": From 52a987ea2f299c8215c1107b8dd793919c962f10 Mon Sep 17 00:00:00 2001 From: Ope Oluwaferanmi <111365699+FEROS01@users.noreply.github.com> Date: Fri, 20 Oct 2023 22:28:21 +0100 Subject: [PATCH 185/306] Add docstrings and doctests and fix a bug ciphers/trifid_cipher.py (#10716) * Added docstrings,doctests and fixed a bug * Added docstrings,doctests and fixed a bug * Added docstrings,doctests and fixed a bug * Added docstrings and doctests with a bug fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added docstrings and doctests with a bug fix * Update ciphers/trifid_cipher.py Co-authored-by: Christian Clauss * Update ciphers/trifid_cipher.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Docstrings edit * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update trifid_cipher.py * Update pyproject.toml --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- ciphers/trifid_cipher.py | 191 +++++++++++++++++++++++++++------------ pyproject.toml | 2 +- 2 files changed, 134 insertions(+), 59 deletions(-) diff --git a/ciphers/trifid_cipher.py b/ciphers/trifid_cipher.py index 8aa2263ca..16b9faf67 100644 --- a/ciphers/trifid_cipher.py +++ b/ciphers/trifid_cipher.py @@ -1,15 +1,35 @@ -# https://en.wikipedia.org/wiki/Trifid_cipher +""" +The trifid cipher uses a table to fractionate each plaintext letter into a trigram, +mixes the constituents of the trigrams, and then applies the table in reverse to turn +these mixed trigrams into ciphertext letters. + +https://en.wikipedia.org/wiki/Trifid_cipher +""" + from __future__ import annotations +# fmt: off +TEST_CHARACTER_TO_NUMBER = { + "A": "111", "B": "112", "C": "113", "D": "121", "E": "122", "F": "123", "G": "131", + "H": "132", "I": "133", "J": "211", "K": "212", "L": "213", "M": "221", "N": "222", + "O": "223", "P": "231", "Q": "232", "R": "233", "S": "311", "T": "312", "U": "313", + "V": "321", "W": "322", "X": "323", "Y": "331", "Z": "332", "+": "333", +} +# fmt: off + +TEST_NUMBER_TO_CHARACTER = {val: key for key, val in TEST_CHARACTER_TO_NUMBER.items()} + def __encrypt_part(message_part: str, character_to_number: dict[str, str]) -> str: + """ + Arrange the triagram value of each letter of 'message_part' vertically and join + them horizontally. + + >>> __encrypt_part('ASK', TEST_CHARACTER_TO_NUMBER) + '132111112' + """ one, two, three = "", "", "" - tmp = [] - - for character in message_part: - tmp.append(character_to_number[character]) - - for each in tmp: + for each in (character_to_number[character] for character in message_part): one += each[0] two += each[1] three += each[2] @@ -20,12 +40,16 @@ def __encrypt_part(message_part: str, character_to_number: dict[str, str]) -> st def __decrypt_part( message_part: str, character_to_number: dict[str, str] ) -> tuple[str, str, str]: - tmp, this_part = "", "" + """ + Convert each letter of the input string into their respective trigram values, join + them and split them into three equal groups of strings which are returned. + + >>> __decrypt_part('ABCDE', TEST_CHARACTER_TO_NUMBER) + ('11111', '21131', '21122') + """ + this_part = "".join(character_to_number[character] for character in message_part) result = [] - - for character in message_part: - this_part += character_to_number[character] - + tmp = "" for digit in this_part: tmp += digit if len(tmp) == len(message_part): @@ -38,6 +62,42 @@ def __decrypt_part( def __prepare( message: str, alphabet: str ) -> tuple[str, str, dict[str, str], dict[str, str]]: + """ + A helper function that generates the triagrams and assigns each letter of the + alphabet to its corresponding triagram and stores this in a dictionary + ("character_to_number" and "number_to_character") after confirming if the + alphabet's length is 27. + + >>> test = __prepare('I aM a BOy','abCdeFghijkLmnopqrStuVwxYZ+') + >>> expected = ('IAMABOY','ABCDEFGHIJKLMNOPQRSTUVWXYZ+', + ... TEST_CHARACTER_TO_NUMBER, TEST_NUMBER_TO_CHARACTER) + >>> test == expected + True + + Testing with incomplete alphabet + >>> __prepare('I aM a BOy','abCdeFghijkLmnopqrStuVw') + Traceback (most recent call last): + ... + KeyError: 'Length of alphabet has to be 27.' + + Testing with extra long alphabets + >>> __prepare('I aM a BOy','abCdeFghijkLmnopqrStuVwxyzzwwtyyujjgfd') + Traceback (most recent call last): + ... + KeyError: 'Length of alphabet has to be 27.' + + Testing with punctuations that are not in the given alphabet + >>> __prepare('am i a boy?','abCdeFghijkLmnopqrStuVwxYZ+') + Traceback (most recent call last): + ... + ValueError: Each message character has to be included in alphabet! + + Testing with numbers + >>> __prepare(500,'abCdeFghijkLmnopqrStuVwxYZ+') + Traceback (most recent call last): + ... + AttributeError: 'int' object has no attribute 'replace' + """ # Validate message and alphabet, set to upper and remove spaces alphabet = alphabet.replace(" ", "").upper() message = message.replace(" ", "").upper() @@ -45,45 +105,14 @@ def __prepare( # Check length and characters if len(alphabet) != 27: raise KeyError("Length of alphabet has to be 27.") - for each in message: - if each not in alphabet: - raise ValueError("Each message character has to be included in alphabet!") + if any(char not in alphabet for char in message): + raise ValueError("Each message character has to be included in alphabet!") # Generate dictionares - numbers = ( - "111", - "112", - "113", - "121", - "122", - "123", - "131", - "132", - "133", - "211", - "212", - "213", - "221", - "222", - "223", - "231", - "232", - "233", - "311", - "312", - "313", - "321", - "322", - "323", - "331", - "332", - "333", - ) - character_to_number = {} - number_to_character = {} - for letter, number in zip(alphabet, numbers): - character_to_number[letter] = number - number_to_character[number] = letter + character_to_number = dict(zip(alphabet, TEST_CHARACTER_TO_NUMBER.values())) + number_to_character = { + number: letter for letter, number in character_to_number.items() + } return message, alphabet, character_to_number, number_to_character @@ -91,44 +120,90 @@ def __prepare( def encrypt_message( message: str, alphabet: str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ.", period: int = 5 ) -> str: + """ + encrypt_message + =============== + + Encrypts a message using the trifid_cipher. Any punctuatuions that + would be used should be added to the alphabet. + + PARAMETERS + ---------- + + * message: The message you want to encrypt. + * alphabet (optional): The characters to be used for the cipher . + * period (optional): The number of characters you want in a group whilst + encrypting. + + >>> encrypt_message('I am a boy') + 'BCDGBQY' + + >>> encrypt_message(' ') + '' + + >>> encrypt_message(' aide toi le c iel ta id era ', + ... 'FELIXMARDSTBCGHJKNOPQUVWYZ+',5) + 'FMJFVOISSUFTFPUFEQQC' + + """ message, alphabet, character_to_number, number_to_character = __prepare( message, alphabet ) - encrypted, encrypted_numeric = "", "" + encrypted_numeric = "" for i in range(0, len(message) + 1, period): encrypted_numeric += __encrypt_part( message[i : i + period], character_to_number ) + encrypted = "" for i in range(0, len(encrypted_numeric), 3): encrypted += number_to_character[encrypted_numeric[i : i + 3]] - return encrypted def decrypt_message( message: str, alphabet: str = "ABCDEFGHIJKLMNOPQRSTUVWXYZ.", period: int = 5 ) -> str: + """ + decrypt_message + =============== + + Decrypts a trifid_cipher encrypted message . + + PARAMETERS + ---------- + + * message: The message you want to decrypt . + * alphabet (optional): The characters used for the cipher. + * period (optional): The number of characters used in grouping when it + was encrypted. + + >>> decrypt_message('BCDGBQY') + 'IAMABOY' + + Decrypting with your own alphabet and period + >>> decrypt_message('FMJFVOISSUFTFPUFEQQC','FELIXMARDSTBCGHJKNOPQUVWYZ+',5) + 'AIDETOILECIELTAIDERA' + """ message, alphabet, character_to_number, number_to_character = __prepare( message, alphabet ) - decrypted_numeric = [] - decrypted = "" - for i in range(0, len(message) + 1, period): + decrypted_numeric = [] + for i in range(0, len(message), period): a, b, c = __decrypt_part(message[i : i + period], character_to_number) for j in range(len(a)): decrypted_numeric.append(a[j] + b[j] + c[j]) - for each in decrypted_numeric: - decrypted += number_to_character[each] - - return decrypted + return "".join(number_to_character[each] for each in decrypted_numeric) if __name__ == "__main__": + import doctest + + doctest.testmod() msg = "DEFEND THE EAST WALL OF THE CASTLE." encrypted = encrypt_message(msg, "EPSDUCVWYM.ZLKXNBTFGORIJHAQ") decrypted = decrypt_message(encrypted, "EPSDUCVWYM.ZLKXNBTFGORIJHAQ") diff --git a/pyproject.toml b/pyproject.toml index 9c9262d77..790a328b3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -135,5 +135,5 @@ omit = [ sort = "Cover" [tool.codespell] -ignore-words-list = "3rt,ans,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,zar" +ignore-words-list = "3rt,ans,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" From 5645084dcd5cf398caefa40641ac99144a40e572 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Fri, 20 Oct 2023 17:29:42 -0400 Subject: [PATCH 186/306] Consolidate loss functions into a single file (#10737) * Consolidate loss functions into single file * updating DIRECTORY.md * Fix typo --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 8 +- machine_learning/loss_functions.py | 252 ++++++++++++++++++ .../loss_functions/binary_cross_entropy.py | 59 ---- .../categorical_cross_entropy.py | 85 ------ machine_learning/loss_functions/hinge_loss.py | 64 ----- machine_learning/loss_functions/huber_loss.py | 52 ---- .../loss_functions/mean_squared_error.py | 51 ---- .../mean_squared_logarithmic_error.py | 55 ---- 8 files changed, 253 insertions(+), 373 deletions(-) create mode 100644 machine_learning/loss_functions.py delete mode 100644 machine_learning/loss_functions/binary_cross_entropy.py delete mode 100644 machine_learning/loss_functions/categorical_cross_entropy.py delete mode 100644 machine_learning/loss_functions/hinge_loss.py delete mode 100644 machine_learning/loss_functions/huber_loss.py delete mode 100644 machine_learning/loss_functions/mean_squared_error.py delete mode 100644 machine_learning/loss_functions/mean_squared_logarithmic_error.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 5b7ca856e..b92f8f877 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -549,13 +549,7 @@ * Local Weighted Learning * [Local Weighted Learning](machine_learning/local_weighted_learning/local_weighted_learning.py) * [Logistic Regression](machine_learning/logistic_regression.py) - * Loss Functions - * [Binary Cross Entropy](machine_learning/loss_functions/binary_cross_entropy.py) - * [Categorical Cross Entropy](machine_learning/loss_functions/categorical_cross_entropy.py) - * [Hinge Loss](machine_learning/loss_functions/hinge_loss.py) - * [Huber Loss](machine_learning/loss_functions/huber_loss.py) - * [Mean Squared Error](machine_learning/loss_functions/mean_squared_error.py) - * [Mean Squared Logarithmic Error](machine_learning/loss_functions/mean_squared_logarithmic_error.py) + * [Loss Functions](machine_learning/loss_functions.py) * [Mfcc](machine_learning/mfcc.py) * [Multilayer Perceptron Classifier](machine_learning/multilayer_perceptron_classifier.py) * [Polynomial Regression](machine_learning/polynomial_regression.py) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py new file mode 100644 index 000000000..0fa0956ed --- /dev/null +++ b/machine_learning/loss_functions.py @@ -0,0 +1,252 @@ +import numpy as np + + +def binary_cross_entropy( + y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-15 +) -> float: + """ + Calculate the mean binary cross-entropy (BCE) loss between true labels and predicted + probabilities. + + BCE loss quantifies dissimilarity between true labels (0 or 1) and predicted + probabilities. It's widely used in binary classification tasks. + + BCE = -Σ(y_true * ln(y_pred) + (1 - y_true) * ln(1 - y_pred)) + + Reference: https://en.wikipedia.org/wiki/Cross_entropy + + Parameters: + - y_true: True binary labels (0 or 1) + - y_pred: Predicted probabilities for class 1 + - epsilon: Small constant to avoid numerical instability + + >>> true_labels = np.array([0, 1, 1, 0, 1]) + >>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8]) + >>> binary_cross_entropy(true_labels, predicted_probs) + 0.2529995012327421 + >>> true_labels = np.array([0, 1, 1, 0, 1]) + >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) + >>> binary_cross_entropy(true_labels, predicted_probs) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same length. + """ + if len(y_true) != len(y_pred): + raise ValueError("Input arrays must have the same length.") + + y_pred = np.clip(y_pred, epsilon, 1 - epsilon) # Clip predictions to avoid log(0) + bce_loss = -(y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred)) + return np.mean(bce_loss) + + +def categorical_cross_entropy( + y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-15 +) -> float: + """ + Calculate categorical cross-entropy (CCE) loss between true class labels and + predicted class probabilities. + + CCE = -Σ(y_true * ln(y_pred)) + + Reference: https://en.wikipedia.org/wiki/Cross_entropy + + Parameters: + - y_true: True class labels (one-hot encoded) + - y_pred: Predicted class probabilities + - epsilon: Small constant to avoid numerical instability + + >>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]]) + >>> categorical_cross_entropy(true_labels, pred_probs) + 0.567395975254385 + >>> true_labels = np.array([[1, 0], [0, 1]]) + >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) + >>> categorical_cross_entropy(true_labels, pred_probs) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same shape. + >>> true_labels = np.array([[2, 0, 1], [1, 0, 0]]) + >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) + >>> categorical_cross_entropy(true_labels, pred_probs) + Traceback (most recent call last): + ... + ValueError: y_true must be one-hot encoded. + >>> true_labels = np.array([[1, 0, 1], [1, 0, 0]]) + >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) + >>> categorical_cross_entropy(true_labels, pred_probs) + Traceback (most recent call last): + ... + ValueError: y_true must be one-hot encoded. + >>> true_labels = np.array([[1, 0, 0], [0, 1, 0]]) + >>> pred_probs = np.array([[0.9, 0.1, 0.1], [0.2, 0.7, 0.1]]) + >>> categorical_cross_entropy(true_labels, pred_probs) + Traceback (most recent call last): + ... + ValueError: Predicted probabilities must sum to approximately 1. + """ + if y_true.shape != y_pred.shape: + raise ValueError("Input arrays must have the same shape.") + + if np.any((y_true != 0) & (y_true != 1)) or np.any(y_true.sum(axis=1) != 1): + raise ValueError("y_true must be one-hot encoded.") + + if not np.all(np.isclose(np.sum(y_pred, axis=1), 1, rtol=epsilon, atol=epsilon)): + raise ValueError("Predicted probabilities must sum to approximately 1.") + + y_pred = np.clip(y_pred, epsilon, 1) # Clip predictions to avoid log(0) + return -np.sum(y_true * np.log(y_pred)) + + +def hinge_loss(y_true: np.ndarray, y_pred: np.ndarray) -> float: + """ + Calculate the mean hinge loss for between true labels and predicted probabilities + for training support vector machines (SVMs). + + Hinge loss = max(0, 1 - true * pred) + + Reference: https://en.wikipedia.org/wiki/Hinge_loss + + Args: + - y_true: actual values (ground truth) encoded as -1 or 1 + - y_pred: predicted values + + >>> true_labels = np.array([-1, 1, 1, -1, 1]) + >>> pred = np.array([-4, -0.3, 0.7, 5, 10]) + >>> hinge_loss(true_labels, pred) + 1.52 + >>> true_labels = np.array([-1, 1, 1, -1, 1, 1]) + >>> pred = np.array([-4, -0.3, 0.7, 5, 10]) + >>> hinge_loss(true_labels, pred) + Traceback (most recent call last): + ... + ValueError: Length of predicted and actual array must be same. + >>> true_labels = np.array([-1, 1, 10, -1, 1]) + >>> pred = np.array([-4, -0.3, 0.7, 5, 10]) + >>> hinge_loss(true_labels, pred) + Traceback (most recent call last): + ... + ValueError: y_true can have values -1 or 1 only. + """ + if len(y_true) != len(y_pred): + raise ValueError("Length of predicted and actual array must be same.") + + if np.any((y_true != -1) & (y_true != 1)): + raise ValueError("y_true can have values -1 or 1 only.") + + hinge_losses = np.maximum(0, 1.0 - (y_true * y_pred)) + return np.mean(hinge_losses) + + +def huber_loss(y_true: np.ndarray, y_pred: np.ndarray, delta: float) -> float: + """ + Calculate the mean Huber loss between the given ground truth and predicted values. + + The Huber loss describes the penalty incurred by an estimation procedure, and it + serves as a measure of accuracy for regression models. + + Huber loss = + 0.5 * (y_true - y_pred)^2 if |y_true - y_pred| <= delta + delta * |y_true - y_pred| - 0.5 * delta^2 otherwise + + Reference: https://en.wikipedia.org/wiki/Huber_loss + + Parameters: + - y_true: The true values (ground truth) + - y_pred: The predicted values + + >>> true_values = np.array([0.9, 10.0, 2.0, 1.0, 5.2]) + >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) + >>> np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102) + True + >>> true_labels = np.array([11.0, 21.0, 3.32, 4.0, 5.0]) + >>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0]) + >>> np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164) + True + >>> true_labels = np.array([11.0, 21.0, 3.32, 4.0]) + >>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0]) + >>> huber_loss(true_labels, predicted_probs, 1.0) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same length. + """ + if len(y_true) != len(y_pred): + raise ValueError("Input arrays must have the same length.") + + huber_mse = 0.5 * (y_true - y_pred) ** 2 + huber_mae = delta * (np.abs(y_true - y_pred) - 0.5 * delta) + return np.where(np.abs(y_true - y_pred) <= delta, huber_mse, huber_mae).mean() + + +def mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: + """ + Calculate the mean squared error (MSE) between ground truth and predicted values. + + MSE measures the squared difference between true values and predicted values, and it + serves as a measure of accuracy for regression models. + + MSE = (1/n) * Σ(y_true - y_pred)^2 + + Reference: https://en.wikipedia.org/wiki/Mean_squared_error + + Parameters: + - y_true: The true values (ground truth) + - y_pred: The predicted values + + >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) + >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) + >>> np.isclose(mean_squared_error(true_values, predicted_values), 0.028) + True + >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) + >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) + >>> mean_squared_error(true_labels, predicted_probs) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same length. + """ + if len(y_true) != len(y_pred): + raise ValueError("Input arrays must have the same length.") + + squared_errors = (y_true - y_pred) ** 2 + return np.mean(squared_errors) + + +def mean_squared_logarithmic_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: + """ + Calculate the mean squared logarithmic error (MSLE) between ground truth and + predicted values. + + MSLE measures the squared logarithmic difference between true values and predicted + values for regression models. It's particularly useful for dealing with skewed or + large-value data, and it's often used when the relative differences between + predicted and true values are more important than absolute differences. + + MSLE = (1/n) * Σ(log(1 + y_true) - log(1 + y_pred))^2 + + Reference: https://insideaiml.com/blog/MeanSquared-Logarithmic-Error-Loss-1035 + + Parameters: + - y_true: The true values (ground truth) + - y_pred: The predicted values + + >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) + >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) + >>> mean_squared_logarithmic_error(true_values, predicted_values) + 0.0030860877925181344 + >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) + >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) + >>> mean_squared_logarithmic_error(true_labels, predicted_probs) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same length. + """ + if len(y_true) != len(y_pred): + raise ValueError("Input arrays must have the same length.") + + squared_logarithmic_errors = (np.log1p(y_true) - np.log1p(y_pred)) ** 2 + return np.mean(squared_logarithmic_errors) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/machine_learning/loss_functions/binary_cross_entropy.py b/machine_learning/loss_functions/binary_cross_entropy.py deleted file mode 100644 index 4ebca7f21..000000000 --- a/machine_learning/loss_functions/binary_cross_entropy.py +++ /dev/null @@ -1,59 +0,0 @@ -""" -Binary Cross-Entropy (BCE) Loss Function - -Description: -Quantifies dissimilarity between true labels (0 or 1) and predicted probabilities. -It's widely used in binary classification tasks. - -Formula: -BCE = -Σ(y_true * log(y_pred) + (1 - y_true) * log(1 - y_pred)) - -Source: -[Wikipedia - Cross entropy](https://en.wikipedia.org/wiki/Cross_entropy) -""" - -import numpy as np - - -def binary_cross_entropy( - y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-15 -) -> float: - """ - Calculate the BCE Loss between true labels and predicted probabilities. - - Parameters: - - y_true: True binary labels (0 or 1). - - y_pred: Predicted probabilities for class 1. - - epsilon: Small constant to avoid numerical instability. - - Returns: - - bce_loss: Binary Cross-Entropy Loss. - - Example Usage: - >>> true_labels = np.array([0, 1, 1, 0, 1]) - >>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8]) - >>> binary_cross_entropy(true_labels, predicted_probs) - 0.2529995012327421 - >>> true_labels = np.array([0, 1, 1, 0, 1]) - >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) - >>> binary_cross_entropy(true_labels, predicted_probs) - Traceback (most recent call last): - ... - ValueError: Input arrays must have the same length. - """ - if len(y_true) != len(y_pred): - raise ValueError("Input arrays must have the same length.") - # Clip predicted probabilities to avoid log(0) and log(1) - y_pred = np.clip(y_pred, epsilon, 1 - epsilon) - - # Calculate binary cross-entropy loss - bce_loss = -(y_true * np.log(y_pred) + (1 - y_true) * np.log(1 - y_pred)) - - # Take the mean over all samples - return np.mean(bce_loss) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() diff --git a/machine_learning/loss_functions/categorical_cross_entropy.py b/machine_learning/loss_functions/categorical_cross_entropy.py deleted file mode 100644 index 68f98902b..000000000 --- a/machine_learning/loss_functions/categorical_cross_entropy.py +++ /dev/null @@ -1,85 +0,0 @@ -""" -Categorical Cross-Entropy Loss - -This function calculates the Categorical Cross-Entropy Loss between true class -labels and predicted class probabilities. - -Formula: -Categorical Cross-Entropy Loss = -Σ(y_true * ln(y_pred)) - -Resources: -- [Wikipedia - Cross entropy](https://en.wikipedia.org/wiki/Cross_entropy) -""" - -import numpy as np - - -def categorical_cross_entropy( - y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-15 -) -> float: - """ - Calculate Categorical Cross-Entropy Loss between true class labels and - predicted class probabilities. - - Parameters: - - y_true: True class labels (one-hot encoded) as a NumPy array. - - y_pred: Predicted class probabilities as a NumPy array. - - epsilon: Small constant to avoid numerical instability. - - Returns: - - ce_loss: Categorical Cross-Entropy Loss as a floating-point number. - - Example: - >>> true_labels = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) - >>> pred_probs = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1], [0.0, 0.1, 0.9]]) - >>> categorical_cross_entropy(true_labels, pred_probs) - 0.567395975254385 - - >>> y_true = np.array([[1, 0], [0, 1]]) - >>> y_pred = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) - >>> categorical_cross_entropy(y_true, y_pred) - Traceback (most recent call last): - ... - ValueError: Input arrays must have the same shape. - - >>> y_true = np.array([[2, 0, 1], [1, 0, 0]]) - >>> y_pred = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) - >>> categorical_cross_entropy(y_true, y_pred) - Traceback (most recent call last): - ... - ValueError: y_true must be one-hot encoded. - - >>> y_true = np.array([[1, 0, 1], [1, 0, 0]]) - >>> y_pred = np.array([[0.9, 0.1, 0.0], [0.2, 0.7, 0.1]]) - >>> categorical_cross_entropy(y_true, y_pred) - Traceback (most recent call last): - ... - ValueError: y_true must be one-hot encoded. - - >>> y_true = np.array([[1, 0, 0], [0, 1, 0]]) - >>> y_pred = np.array([[0.9, 0.1, 0.1], [0.2, 0.7, 0.1]]) - >>> categorical_cross_entropy(y_true, y_pred) - Traceback (most recent call last): - ... - ValueError: Predicted probabilities must sum to approximately 1. - """ - if y_true.shape != y_pred.shape: - raise ValueError("Input arrays must have the same shape.") - - if np.any((y_true != 0) & (y_true != 1)) or np.any(y_true.sum(axis=1) != 1): - raise ValueError("y_true must be one-hot encoded.") - - if not np.all(np.isclose(np.sum(y_pred, axis=1), 1, rtol=epsilon, atol=epsilon)): - raise ValueError("Predicted probabilities must sum to approximately 1.") - - # Clip predicted probabilities to avoid log(0) - y_pred = np.clip(y_pred, epsilon, 1) - - # Calculate categorical cross-entropy loss - return -np.sum(y_true * np.log(y_pred)) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() diff --git a/machine_learning/loss_functions/hinge_loss.py b/machine_learning/loss_functions/hinge_loss.py deleted file mode 100644 index 5480a8cd6..000000000 --- a/machine_learning/loss_functions/hinge_loss.py +++ /dev/null @@ -1,64 +0,0 @@ -""" -Hinge Loss - -Description: -Compute the Hinge loss used for training SVM (Support Vector Machine). - -Formula: -loss = max(0, 1 - true * pred) - -Reference: https://en.wikipedia.org/wiki/Hinge_loss - -Author: Poojan Smart -Email: smrtpoojan@gmail.com -""" - -import numpy as np - - -def hinge_loss(y_true: np.ndarray, y_pred: np.ndarray) -> float: - """ - Calculate the mean hinge loss for y_true and y_pred for binary classification. - - Args: - y_true: Array of actual values (ground truth) encoded as -1 and 1. - y_pred: Array of predicted values. - - Returns: - The hinge loss between y_true and y_pred. - - Examples: - >>> y_true = np.array([-1, 1, 1, -1, 1]) - >>> pred = np.array([-4, -0.3, 0.7, 5, 10]) - >>> hinge_loss(y_true, pred) - 1.52 - >>> y_true = np.array([-1, 1, 1, -1, 1, 1]) - >>> pred = np.array([-4, -0.3, 0.7, 5, 10]) - >>> hinge_loss(y_true, pred) - Traceback (most recent call last): - ... - ValueError: Length of predicted and actual array must be same. - >>> y_true = np.array([-1, 1, 10, -1, 1]) - >>> pred = np.array([-4, -0.3, 0.7, 5, 10]) - >>> hinge_loss(y_true, pred) - Traceback (most recent call last): - ... - ValueError: y_true can have values -1 or 1 only. - """ - - if len(y_true) != len(y_pred): - raise ValueError("Length of predicted and actual array must be same.") - - # Raise value error when y_true (encoded labels) have any other values - # than -1 and 1 - if np.any((y_true != -1) & (y_true != 1)): - raise ValueError("y_true can have values -1 or 1 only.") - - hinge_losses = np.maximum(0, 1.0 - (y_true * y_pred)) - return np.mean(hinge_losses) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() diff --git a/machine_learning/loss_functions/huber_loss.py b/machine_learning/loss_functions/huber_loss.py deleted file mode 100644 index 202e013f2..000000000 --- a/machine_learning/loss_functions/huber_loss.py +++ /dev/null @@ -1,52 +0,0 @@ -""" -Huber Loss Function - -Description: -Huber loss function describes the penalty incurred by an estimation procedure. -It serves as a measure of the model's accuracy in regression tasks. - -Formula: -Huber Loss = if |y_true - y_pred| <= delta then 0.5 * (y_true - y_pred)^2 - else delta * |y_true - y_pred| - 0.5 * delta^2 - -Source: -[Wikipedia - Huber Loss](https://en.wikipedia.org/wiki/Huber_loss) -""" - -import numpy as np - - -def huber_loss(y_true: np.ndarray, y_pred: np.ndarray, delta: float) -> float: - """ - Calculate the mean of Huber Loss. - - Parameters: - - y_true: The true values (ground truth). - - y_pred: The predicted values. - - Returns: - - huber_loss: The mean of Huber Loss between y_true and y_pred. - - Example usage: - >>> true_values = np.array([0.9, 10.0, 2.0, 1.0, 5.2]) - >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> np.isclose(huber_loss(true_values, predicted_values, 1.0), 2.102) - True - >>> true_labels = np.array([11.0, 21.0, 3.32, 4.0, 5.0]) - >>> predicted_probs = np.array([8.3, 20.8, 2.9, 11.2, 5.0]) - >>> np.isclose(huber_loss(true_labels, predicted_probs, 1.0), 1.80164) - True - """ - - if len(y_true) != len(y_pred): - raise ValueError("Input arrays must have the same length.") - - huber_mse = 0.5 * (y_true - y_pred) ** 2 - huber_mae = delta * (np.abs(y_true - y_pred) - 0.5 * delta) - return np.where(np.abs(y_true - y_pred) <= delta, huber_mse, huber_mae).mean() - - -if __name__ == "__main__": - import doctest - - doctest.testmod() diff --git a/machine_learning/loss_functions/mean_squared_error.py b/machine_learning/loss_functions/mean_squared_error.py deleted file mode 100644 index d2b0e1e15..000000000 --- a/machine_learning/loss_functions/mean_squared_error.py +++ /dev/null @@ -1,51 +0,0 @@ -""" -Mean Squared Error (MSE) Loss Function - -Description: -MSE measures the mean squared difference between true values and predicted values. -It serves as a measure of the model's accuracy in regression tasks. - -Formula: -MSE = (1/n) * Σ(y_true - y_pred)^2 - -Source: -[Wikipedia - Mean squared error](https://en.wikipedia.org/wiki/Mean_squared_error) -""" - -import numpy as np - - -def mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: - """ - Calculate the Mean Squared Error (MSE) between two arrays. - - Parameters: - - y_true: The true values (ground truth). - - y_pred: The predicted values. - - Returns: - - mse: The Mean Squared Error between y_true and y_pred. - - Example usage: - >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) - >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> mean_squared_error(true_values, predicted_values) - 0.028000000000000032 - >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) - >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) - >>> mean_squared_error(true_labels, predicted_probs) - Traceback (most recent call last): - ... - ValueError: Input arrays must have the same length. - """ - if len(y_true) != len(y_pred): - raise ValueError("Input arrays must have the same length.") - - squared_errors = (y_true - y_pred) ** 2 - return np.mean(squared_errors) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() diff --git a/machine_learning/loss_functions/mean_squared_logarithmic_error.py b/machine_learning/loss_functions/mean_squared_logarithmic_error.py deleted file mode 100644 index 935ebff37..000000000 --- a/machine_learning/loss_functions/mean_squared_logarithmic_error.py +++ /dev/null @@ -1,55 +0,0 @@ -""" -Mean Squared Logarithmic Error (MSLE) Loss Function - -Description: -MSLE measures the mean squared logarithmic difference between -true values and predicted values, particularly useful when -dealing with regression problems involving skewed or large-value -targets. It is often used when the relative differences between -predicted and true values are more important than absolute -differences. - -Formula: -MSLE = (1/n) * Σ(log(1 + y_true) - log(1 + y_pred))^2 - -Source: -(https://insideaiml.com/blog/MeanSquared-Logarithmic-Error-Loss-1035) -""" - -import numpy as np - - -def mean_squared_logarithmic_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: - """ - Calculate the Mean Squared Logarithmic Error (MSLE) between two arrays. - - Parameters: - - y_true: The true values (ground truth). - - y_pred: The predicted values. - - Returns: - - msle: The Mean Squared Logarithmic Error between y_true and y_pred. - - Example usage: - >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) - >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) - >>> mean_squared_logarithmic_error(true_values, predicted_values) - 0.0030860877925181344 - >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) - >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) - >>> mean_squared_logarithmic_error(true_labels, predicted_probs) - Traceback (most recent call last): - ... - ValueError: Input arrays must have the same length. - """ - if len(y_true) != len(y_pred): - raise ValueError("Input arrays must have the same length.") - - squared_logarithmic_errors = (np.log1p(y_true) - np.log1p(y_pred)) ** 2 - return np.mean(squared_logarithmic_errors) - - -if __name__ == "__main__": - import doctest - - doctest.testmod() From 47c19d9b2da6a56f47b520e6c5ca6b654a5eff47 Mon Sep 17 00:00:00 2001 From: Jeel Gajera <83470656+JeelGajera@users.noreply.github.com> Date: Sat, 21 Oct 2023 20:21:29 +0530 Subject: [PATCH 187/306] Add: FP Growth Algorithm (#10746) * Add: FP Growth Algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changes names * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Revert "changes names" This reverts commit c0470094d01391294617df6a92734b78b470b127. * refactore code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update frequent_pattern_growth.py --------- Co-authored-by: Jeel Gajera Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 + machine_learning/frequent_pattern_growth.py | 349 ++++++++++++++++++++ 2 files changed, 350 insertions(+) create mode 100644 machine_learning/frequent_pattern_growth.py diff --git a/DIRECTORY.md b/DIRECTORY.md index b92f8f877..916d993c5 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -541,6 +541,7 @@ * [Dimensionality Reduction](machine_learning/dimensionality_reduction.py) * Forecasting * [Run](machine_learning/forecasting/run.py) + * [Frequent Pattern Growth Algorithm](machine_learning/frequent_pattern_growth.py) * [Gradient Descent](machine_learning/gradient_descent.py) * [K Means Clust](machine_learning/k_means_clust.py) * [K Nearest Neighbours](machine_learning/k_nearest_neighbours.py) diff --git a/machine_learning/frequent_pattern_growth.py b/machine_learning/frequent_pattern_growth.py new file mode 100644 index 000000000..205d59846 --- /dev/null +++ b/machine_learning/frequent_pattern_growth.py @@ -0,0 +1,349 @@ +""" +The Frequent Pattern Growth algorithm (FP-Growth) is a widely used data mining +technique for discovering frequent itemsets in large transaction databases. + +It overcomes some of the limitations of traditional methods such as Apriori by +efficiently constructing the FP-Tree + +WIKI: https://athena.ecs.csus.edu/~mei/associationcw/FpGrowth.html + +Examples: https://www.javatpoint.com/fp-growth-algorithm-in-data-mining +""" +from __future__ import annotations + +from dataclasses import dataclass, field + + +@dataclass +class TreeNode: + """ + A node in a Frequent Pattern tree. + + Args: + name: The name of this node. + num_occur: The number of occurrences of the node. + parent_node: The parent node. + + Example: + >>> parent = TreeNode("Parent", 1, None) + >>> child = TreeNode("Child", 2, parent) + >>> child.name + 'Child' + >>> child.count + 2 + """ + + name: str + count: int + parent: TreeNode | None = None + children: dict[str, TreeNode] = field(default_factory=dict) + node_link: TreeNode | None = None + + def __repr__(self) -> str: + return f"TreeNode({self.name!r}, {self.count!r}, {self.parent!r})" + + def inc(self, num_occur: int) -> None: + self.count += num_occur + + def disp(self, ind: int = 1) -> None: + print(f"{' ' * ind} {self.name} {self.count}") + for child in self.children.values(): + child.disp(ind + 1) + + +def create_tree(data_set: list, min_sup: int = 1) -> tuple[TreeNode, dict]: + """ + Create Frequent Pattern tree + + Args: + data_set: A list of transactions, where each transaction is a list of items. + min_sup: The minimum support threshold. + Items with support less than this will be pruned. Default is 1. + + Returns: + The root of the FP-Tree. + header_table: The header table dictionary with item information. + + Example: + >>> data_set = [ + ... ['A', 'B', 'C'], + ... ['A', 'C'], + ... ['A', 'B', 'E'], + ... ['A', 'B', 'C', 'E'], + ... ['B', 'E'] + ... ] + >>> min_sup = 2 + >>> fp_tree, header_table = create_tree(data_set, min_sup) + >>> fp_tree + TreeNode('Null Set', 1, None) + >>> len(header_table) + 4 + >>> header_table["A"] + [[4, None], TreeNode('A', 4, TreeNode('Null Set', 1, None))] + >>> header_table["E"][1] # doctest: +NORMALIZE_WHITESPACE + TreeNode('E', 1, TreeNode('B', 3, TreeNode('A', 4, TreeNode('Null Set', 1, None)))) + >>> sorted(header_table) + ['A', 'B', 'C', 'E'] + >>> fp_tree.name + 'Null Set' + >>> sorted(fp_tree.children) + ['A', 'B'] + >>> fp_tree.children['A'].name + 'A' + >>> sorted(fp_tree.children['A'].children) + ['B', 'C'] + """ + header_table: dict = {} + for trans in data_set: + for item in trans: + header_table[item] = header_table.get(item, [0, None]) + header_table[item][0] += 1 + + for k in list(header_table): + if header_table[k][0] < min_sup: + del header_table[k] + + if not (freq_item_set := set(header_table)): + return TreeNode("Null Set", 1, None), {} + + for k in header_table: + header_table[k] = [header_table[k], None] + + fp_tree = TreeNode("Null Set", 1, None) # Parent is None for the root node + for tran_set in data_set: + local_d = { + item: header_table[item][0] for item in tran_set if item in freq_item_set + } + if local_d: + sorted_items = sorted( + local_d.items(), key=lambda item_info: item_info[1], reverse=True + ) + ordered_items = [item[0] for item in sorted_items] + update_tree(ordered_items, fp_tree, header_table, 1) + + return fp_tree, header_table + + +def update_tree(items: list, in_tree: TreeNode, header_table: dict, count: int) -> None: + """ + Update the FP-Tree with a transaction. + + Args: + items: List of items in the transaction. + in_tree: The current node in the FP-Tree. + header_table: The header table dictionary with item information. + count: The count of the transaction. + + Example: + >>> data_set = [ + ... ['A', 'B', 'C'], + ... ['A', 'C'], + ... ['A', 'B', 'E'], + ... ['A', 'B', 'C', 'E'], + ... ['B', 'E'] + ... ] + >>> min_sup = 2 + >>> fp_tree, header_table = create_tree(data_set, min_sup) + >>> fp_tree + TreeNode('Null Set', 1, None) + >>> transaction = ['A', 'B', 'E'] + >>> update_tree(transaction, fp_tree, header_table, 1) + >>> fp_tree + TreeNode('Null Set', 1, None) + >>> fp_tree.children['A'].children['B'].children['E'].children + {} + >>> fp_tree.children['A'].children['B'].children['E'].count + 2 + >>> header_table['E'][1].name + 'E' + """ + if items[0] in in_tree.children: + in_tree.children[items[0]].inc(count) + else: + in_tree.children[items[0]] = TreeNode(items[0], count, in_tree) + if header_table[items[0]][1] is None: + header_table[items[0]][1] = in_tree.children[items[0]] + else: + update_header(header_table[items[0]][1], in_tree.children[items[0]]) + if len(items) > 1: + update_tree(items[1:], in_tree.children[items[0]], header_table, count) + + +def update_header(node_to_test: TreeNode, target_node: TreeNode) -> TreeNode: + """ + Update the header table with a node link. + + Args: + node_to_test: The node to be updated in the header table. + target_node: The node to link to. + + Example: + >>> data_set = [ + ... ['A', 'B', 'C'], + ... ['A', 'C'], + ... ['A', 'B', 'E'], + ... ['A', 'B', 'C', 'E'], + ... ['B', 'E'] + ... ] + >>> min_sup = 2 + >>> fp_tree, header_table = create_tree(data_set, min_sup) + >>> fp_tree + TreeNode('Null Set', 1, None) + >>> node1 = TreeNode("A", 3, None) + >>> node2 = TreeNode("B", 4, None) + >>> node1 + TreeNode('A', 3, None) + >>> node1 = update_header(node1, node2) + >>> node1 + TreeNode('A', 3, None) + >>> node1.node_link + TreeNode('B', 4, None) + >>> node2.node_link is None + True + """ + while node_to_test.node_link is not None: + node_to_test = node_to_test.node_link + if node_to_test.node_link is None: + node_to_test.node_link = target_node + # Return the updated node + return node_to_test + + +def ascend_tree(leaf_node: TreeNode, prefix_path: list[str]) -> None: + """ + Ascend the FP-Tree from a leaf node to its root, adding item names to the prefix + path. + + Args: + leaf_node: The leaf node to start ascending from. + prefix_path: A list to store the item as they are ascended. + + Example: + >>> data_set = [ + ... ['A', 'B', 'C'], + ... ['A', 'C'], + ... ['A', 'B', 'E'], + ... ['A', 'B', 'C', 'E'], + ... ['B', 'E'] + ... ] + >>> min_sup = 2 + >>> fp_tree, header_table = create_tree(data_set, min_sup) + + >>> path = [] + >>> ascend_tree(fp_tree.children['A'], path) + >>> path # ascending from a leaf node 'A' + ['A'] + """ + if leaf_node.parent is not None: + prefix_path.append(leaf_node.name) + ascend_tree(leaf_node.parent, prefix_path) + + +def find_prefix_path(base_pat: frozenset, tree_node: TreeNode | None) -> dict: + """ + Find the conditional pattern base for a given base pattern. + + Args: + base_pat: The base pattern for which to find the conditional pattern base. + tree_node: The node in the FP-Tree. + + Example: + >>> data_set = [ + ... ['A', 'B', 'C'], + ... ['A', 'C'], + ... ['A', 'B', 'E'], + ... ['A', 'B', 'C', 'E'], + ... ['B', 'E'] + ... ] + >>> min_sup = 2 + >>> fp_tree, header_table = create_tree(data_set, min_sup) + >>> fp_tree + TreeNode('Null Set', 1, None) + >>> len(header_table) + 4 + >>> base_pattern = frozenset(['A']) + >>> sorted(find_prefix_path(base_pattern, fp_tree.children['A'])) + [] + """ + cond_pats: dict = {} + while tree_node is not None: + prefix_path: list = [] + ascend_tree(tree_node, prefix_path) + if len(prefix_path) > 1: + cond_pats[frozenset(prefix_path[1:])] = tree_node.count + tree_node = tree_node.node_link + return cond_pats + + +def mine_tree( + in_tree: TreeNode, + header_table: dict, + min_sup: int, + pre_fix: set, + freq_item_list: list, +) -> None: + """ + Mine the FP-Tree recursively to discover frequent itemsets. + + Args: + in_tree: The FP-Tree to mine. + header_table: The header table dictionary with item information. + min_sup: The minimum support threshold. + pre_fix: A set of items as a prefix for the itemsets being mined. + freq_item_list: A list to store the frequent itemsets. + + Example: + >>> data_set = [ + ... ['A', 'B', 'C'], + ... ['A', 'C'], + ... ['A', 'B', 'E'], + ... ['A', 'B', 'C', 'E'], + ... ['B', 'E'] + ... ] + >>> min_sup = 2 + >>> fp_tree, header_table = create_tree(data_set, min_sup) + >>> fp_tree + TreeNode('Null Set', 1, None) + >>> frequent_itemsets = [] + >>> mine_tree(fp_tree, header_table, min_sup, set([]), frequent_itemsets) + >>> expe_itm = [{'C'}, {'C', 'A'}, {'E'}, {'A', 'E'}, {'E', 'B'}, {'A'}, {'B'}] + >>> all(expected in frequent_itemsets for expected in expe_itm) + True + """ + sorted_items = sorted(header_table.items(), key=lambda item_info: item_info[1][0]) + big_l = [item[0] for item in sorted_items] + for base_pat in big_l: + new_freq_set = pre_fix.copy() + new_freq_set.add(base_pat) + freq_item_list.append(new_freq_set) + cond_patt_bases = find_prefix_path(base_pat, header_table[base_pat][1]) + my_cond_tree, my_head = create_tree(list(cond_patt_bases), min_sup) + if my_head is not None: + # Pass header_table[base_pat][1] as node_to_test to update_header + header_table[base_pat][1] = update_header( + header_table[base_pat][1], my_cond_tree + ) + mine_tree(my_cond_tree, my_head, min_sup, new_freq_set, freq_item_list) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + data_set: list[frozenset] = [ + frozenset(["bread", "milk", "cheese"]), + frozenset(["bread", "milk"]), + frozenset(["bread", "diapers"]), + frozenset(["bread", "milk", "diapers"]), + frozenset(["milk", "diapers"]), + frozenset(["milk", "cheese"]), + frozenset(["diapers", "cheese"]), + frozenset(["bread", "milk", "cheese", "diapers"]), + ] + print(f"{len(data_set) = }") + fp_tree, header_table = create_tree(data_set, min_sup=3) + print(f"{fp_tree = }") + print(f"{len(header_table) = }") + freq_items: list = [] + mine_tree(fp_tree, header_table, 3, set(), freq_items) + print(f"{freq_items = }") From 06edc0eea0220f29491f75351cde1af9716aca8d Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sat, 21 Oct 2023 13:27:36 -0400 Subject: [PATCH 188/306] Consolidate binary exponentiation files (#10742) * Consolidate binary exponentiation files * updating DIRECTORY.md * Fix typos in doctests * Add suggestions from code review * Fix timeit benchmarks --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 - maths/binary_exp_mod.py | 28 ---- maths/binary_exponentiation.py | 216 ++++++++++++++++++++++++++----- maths/binary_exponentiation_2.py | 61 --------- 4 files changed, 182 insertions(+), 125 deletions(-) delete mode 100644 maths/binary_exp_mod.py delete mode 100644 maths/binary_exponentiation_2.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 916d993c5..9e0166ad8 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -578,9 +578,7 @@ * [Bailey Borwein Plouffe](maths/bailey_borwein_plouffe.py) * [Base Neg2 Conversion](maths/base_neg2_conversion.py) * [Basic Maths](maths/basic_maths.py) - * [Binary Exp Mod](maths/binary_exp_mod.py) * [Binary Exponentiation](maths/binary_exponentiation.py) - * [Binary Exponentiation 2](maths/binary_exponentiation_2.py) * [Binary Multiplication](maths/binary_multiplication.py) * [Binomial Coefficient](maths/binomial_coefficient.py) * [Binomial Distribution](maths/binomial_distribution.py) diff --git a/maths/binary_exp_mod.py b/maths/binary_exp_mod.py deleted file mode 100644 index 8893182a3..000000000 --- a/maths/binary_exp_mod.py +++ /dev/null @@ -1,28 +0,0 @@ -def bin_exp_mod(a: int, n: int, b: int) -> int: - """ - >>> bin_exp_mod(3, 4, 5) - 1 - >>> bin_exp_mod(7, 13, 10) - 7 - """ - # mod b - assert b != 0, "This cannot accept modulo that is == 0" - if n == 0: - return 1 - - if n % 2 == 1: - return (bin_exp_mod(a, n - 1, b) * a) % b - - r = bin_exp_mod(a, n // 2, b) - return (r * r) % b - - -if __name__ == "__main__": - try: - BASE = int(input("Enter Base : ").strip()) - POWER = int(input("Enter Power : ").strip()) - MODULO = int(input("Enter Modulo : ").strip()) - except ValueError: - print("Invalid literal for integer") - - print(bin_exp_mod(BASE, POWER, MODULO)) diff --git a/maths/binary_exponentiation.py b/maths/binary_exponentiation.py index f613767f5..51ce86d26 100644 --- a/maths/binary_exponentiation.py +++ b/maths/binary_exponentiation.py @@ -1,48 +1,196 @@ -"""Binary Exponentiation.""" +""" +Binary Exponentiation -# Author : Junth Basnet -# Time Complexity : O(logn) +This is a method to find a^b in O(log b) time complexity and is one of the most commonly +used methods of exponentiation. The method is also useful for modular exponentiation, +when the solution to (a^b) % c is required. + +To calculate a^b: +- If b is even, then a^b = (a * a)^(b / 2) +- If b is odd, then a^b = a * a^(b - 1) +Repeat until b = 1 or b = 0 + +For modular exponentiation, we use the fact that (a * b) % c = ((a % c) * (b % c)) % c +""" -def binary_exponentiation(a: int, n: int) -> int: +def binary_exp_recursive(base: float, exponent: int) -> float: """ - Compute a number raised by some quantity - >>> binary_exponentiation(-1, 3) - -1 - >>> binary_exponentiation(-1, 4) - 1 - >>> binary_exponentiation(2, 2) - 4 - >>> binary_exponentiation(3, 5) + Computes a^b recursively, where a is the base and b is the exponent + + >>> binary_exp_recursive(3, 5) 243 - >>> binary_exponentiation(10, 3) - 1000 - >>> binary_exponentiation(5e3, 1) - 5000.0 - >>> binary_exponentiation(-5e3, 1) - -5000.0 + >>> binary_exp_recursive(11, 13) + 34522712143931 + >>> binary_exp_recursive(-1, 3) + -1 + >>> binary_exp_recursive(0, 5) + 0 + >>> binary_exp_recursive(3, 1) + 3 + >>> binary_exp_recursive(3, 0) + 1 + >>> binary_exp_recursive(1.5, 4) + 5.0625 + >>> binary_exp_recursive(3, -1) + Traceback (most recent call last): + ... + ValueError: Exponent must be a non-negative integer """ - if n == 0: + if exponent < 0: + raise ValueError("Exponent must be a non-negative integer") + + if exponent == 0: return 1 - elif n % 2 == 1: - return binary_exponentiation(a, n - 1) * a + if exponent % 2 == 1: + return binary_exp_recursive(base, exponent - 1) * base - else: - b = binary_exponentiation(a, n // 2) - return b * b + b = binary_exp_recursive(base, exponent // 2) + return b * b + + +def binary_exp_iterative(base: float, exponent: int) -> float: + """ + Computes a^b iteratively, where a is the base and b is the exponent + + >>> binary_exp_iterative(3, 5) + 243 + >>> binary_exp_iterative(11, 13) + 34522712143931 + >>> binary_exp_iterative(-1, 3) + -1 + >>> binary_exp_iterative(0, 5) + 0 + >>> binary_exp_iterative(3, 1) + 3 + >>> binary_exp_iterative(3, 0) + 1 + >>> binary_exp_iterative(1.5, 4) + 5.0625 + >>> binary_exp_iterative(3, -1) + Traceback (most recent call last): + ... + ValueError: Exponent must be a non-negative integer + """ + if exponent < 0: + raise ValueError("Exponent must be a non-negative integer") + + res: int | float = 1 + while exponent > 0: + if exponent & 1: + res *= base + + base *= base + exponent >>= 1 + + return res + + +def binary_exp_mod_recursive(base: float, exponent: int, modulus: int) -> float: + """ + Computes a^b % c recursively, where a is the base, b is the exponent, and c is the + modulus + + >>> binary_exp_mod_recursive(3, 4, 5) + 1 + >>> binary_exp_mod_recursive(11, 13, 7) + 4 + >>> binary_exp_mod_recursive(1.5, 4, 3) + 2.0625 + >>> binary_exp_mod_recursive(7, -1, 10) + Traceback (most recent call last): + ... + ValueError: Exponent must be a non-negative integer + >>> binary_exp_mod_recursive(7, 13, 0) + Traceback (most recent call last): + ... + ValueError: Modulus must be a positive integer + """ + if exponent < 0: + raise ValueError("Exponent must be a non-negative integer") + if modulus <= 0: + raise ValueError("Modulus must be a positive integer") + + if exponent == 0: + return 1 + + if exponent % 2 == 1: + return (binary_exp_mod_recursive(base, exponent - 1, modulus) * base) % modulus + + r = binary_exp_mod_recursive(base, exponent // 2, modulus) + return (r * r) % modulus + + +def binary_exp_mod_iterative(base: float, exponent: int, modulus: int) -> float: + """ + Computes a^b % c iteratively, where a is the base, b is the exponent, and c is the + modulus + + >>> binary_exp_mod_iterative(3, 4, 5) + 1 + >>> binary_exp_mod_iterative(11, 13, 7) + 4 + >>> binary_exp_mod_iterative(1.5, 4, 3) + 2.0625 + >>> binary_exp_mod_iterative(7, -1, 10) + Traceback (most recent call last): + ... + ValueError: Exponent must be a non-negative integer + >>> binary_exp_mod_iterative(7, 13, 0) + Traceback (most recent call last): + ... + ValueError: Modulus must be a positive integer + """ + if exponent < 0: + raise ValueError("Exponent must be a non-negative integer") + if modulus <= 0: + raise ValueError("Modulus must be a positive integer") + + res: int | float = 1 + while exponent > 0: + if exponent & 1: + res = ((res % modulus) * (base % modulus)) % modulus + + base *= base + exponent >>= 1 + + return res if __name__ == "__main__": - import doctest + from timeit import timeit - doctest.testmod() + a = 1269380576 + b = 374 + c = 34 - try: - BASE = int(float(input("Enter Base : ").strip())) - POWER = int(input("Enter Power : ").strip()) - except ValueError: - print("Invalid literal for integer") - - RESULT = binary_exponentiation(BASE, POWER) - print(f"{BASE}^({POWER}) : {RESULT}") + runs = 100_000 + print( + timeit( + f"binary_exp_recursive({a}, {b})", + setup="from __main__ import binary_exp_recursive", + number=runs, + ) + ) + print( + timeit( + f"binary_exp_iterative({a}, {b})", + setup="from __main__ import binary_exp_iterative", + number=runs, + ) + ) + print( + timeit( + f"binary_exp_mod_recursive({a}, {b}, {c})", + setup="from __main__ import binary_exp_mod_recursive", + number=runs, + ) + ) + print( + timeit( + f"binary_exp_mod_iterative({a}, {b}, {c})", + setup="from __main__ import binary_exp_mod_iterative", + number=runs, + ) + ) diff --git a/maths/binary_exponentiation_2.py b/maths/binary_exponentiation_2.py deleted file mode 100644 index edb6b66b2..000000000 --- a/maths/binary_exponentiation_2.py +++ /dev/null @@ -1,61 +0,0 @@ -""" -Binary Exponentiation -This is a method to find a^b in O(log b) time complexity -This is one of the most commonly used methods of exponentiation -It's also useful when the solution to (a^b) % c is required because a, b, c may be -over the computer's calculation limits - -Let's say you need to calculate a ^ b -- RULE 1 : a ^ b = (a*a) ^ (b/2) ---- example : 4 ^ 4 = (4*4) ^ (4/2) = 16 ^ 2 -- RULE 2 : IF b is odd, then a ^ b = a * (a ^ (b - 1)), where b - 1 is even -Once b is even, repeat the process until b = 1 or b = 0, because a^1 = a and a^0 = 1 - -For modular exponentiation, we use the fact that (a*b) % c = ((a%c) * (b%c)) % c -Now apply RULE 1 or 2 as required - -@author chinmoy159 -""" - - -def b_expo(a: int, b: int) -> int: - """ - >>> b_expo(2, 10) - 1024 - >>> b_expo(9, 0) - 1 - >>> b_expo(0, 12) - 0 - >>> b_expo(4, 12) - 16777216 - """ - res = 1 - while b > 0: - if b & 1: - res *= a - - a *= a - b >>= 1 - - return res - - -def b_expo_mod(a: int, b: int, c: int) -> int: - """ - >>> b_expo_mod(2, 10, 1000000007) - 1024 - >>> b_expo_mod(11, 13, 19) - 11 - >>> b_expo_mod(0, 19, 20) - 0 - >>> b_expo_mod(15, 5, 4) - 3 - """ - res = 1 - while b > 0: - if b & 1: - res = ((res % c) * (a % c)) % c - - a *= a - b >>= 1 - - return res From b814cf3781a97c273a779823b8b8ab388417b7b4 Mon Sep 17 00:00:00 2001 From: Kiarash Hajian <133909368+kiarash8112@users.noreply.github.com> Date: Sat, 21 Oct 2023 14:53:34 -0400 Subject: [PATCH 189/306] add exponential search algorithm (#10732) * add exponential_search algorithm * replace binary_search with binary_search_recursion * convert left type to int to be useable in binary_search_recursion * add docs and tests for exponential_search algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * move exponential_search to binary_search.py to pass github auto build tests delete exponential_search.py file * Update searches/binary_search.py Co-authored-by: Christian Clauss * remove additional space searches/binary_search.py Co-authored-by: Christian Clauss * return single data type in exponential_search searches/binary_search.py Co-authored-by: Christian Clauss * add doctest mod searches/binary_search.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * use // instread of int() convert searches/binary_search.py Co-authored-by: Christian Clauss * change test according to new code searches/binary_search.py Co-authored-by: Christian Clauss * fix binary_search_recursion multiple type return error * add a timeit benchmark for exponential_search * sort input of binary search to be equal in performance test with exponential_search * raise value error instead of sorting input in binary and exonential search to fix bugs * Update binary_search.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: user --- searches/binary_search.py | 149 +++++++++++++++++++++++++------------- 1 file changed, 100 insertions(+), 49 deletions(-) diff --git a/searches/binary_search.py b/searches/binary_search.py index 05dadd4fe..586be39c9 100644 --- a/searches/binary_search.py +++ b/searches/binary_search.py @@ -1,9 +1,9 @@ #!/usr/bin/env python3 """ -This is pure Python implementation of binary search algorithms +Pure Python implementations of binary search algorithms -For doctests run following command: +For doctests run the following command: python3 -m doctest -v binary_search.py For manual testing run: @@ -34,16 +34,12 @@ def bisect_left( Examples: >>> bisect_left([0, 5, 7, 10, 15], 0) 0 - >>> bisect_left([0, 5, 7, 10, 15], 6) 2 - >>> bisect_left([0, 5, 7, 10, 15], 20) 5 - >>> bisect_left([0, 5, 7, 10, 15], 15, 1, 3) 3 - >>> bisect_left([0, 5, 7, 10, 15], 6, 2) 2 """ @@ -79,16 +75,12 @@ def bisect_right( Examples: >>> bisect_right([0, 5, 7, 10, 15], 0) 1 - >>> bisect_right([0, 5, 7, 10, 15], 15) 5 - >>> bisect_right([0, 5, 7, 10, 15], 6) 2 - >>> bisect_right([0, 5, 7, 10, 15], 15, 1, 3) 3 - >>> bisect_right([0, 5, 7, 10, 15], 6, 2) 2 """ @@ -124,7 +116,6 @@ def insort_left( >>> insort_left(sorted_collection, 6) >>> sorted_collection [0, 5, 6, 7, 10, 15] - >>> sorted_collection = [(0, 0), (5, 5), (7, 7), (10, 10), (15, 15)] >>> item = (5, 5) >>> insort_left(sorted_collection, item) @@ -134,12 +125,10 @@ def insort_left( True >>> item is sorted_collection[2] False - >>> sorted_collection = [0, 5, 7, 10, 15] >>> insort_left(sorted_collection, 20) >>> sorted_collection [0, 5, 7, 10, 15, 20] - >>> sorted_collection = [0, 5, 7, 10, 15] >>> insort_left(sorted_collection, 15, 1, 3) >>> sorted_collection @@ -167,7 +156,6 @@ def insort_right( >>> insort_right(sorted_collection, 6) >>> sorted_collection [0, 5, 6, 7, 10, 15] - >>> sorted_collection = [(0, 0), (5, 5), (7, 7), (10, 10), (15, 15)] >>> item = (5, 5) >>> insort_right(sorted_collection, item) @@ -177,12 +165,10 @@ def insort_right( False >>> item is sorted_collection[2] True - >>> sorted_collection = [0, 5, 7, 10, 15] >>> insort_right(sorted_collection, 20) >>> sorted_collection [0, 5, 7, 10, 15, 20] - >>> sorted_collection = [0, 5, 7, 10, 15] >>> insort_right(sorted_collection, 15, 1, 3) >>> sorted_collection @@ -191,29 +177,28 @@ def insort_right( sorted_collection.insert(bisect_right(sorted_collection, item, lo, hi), item) -def binary_search(sorted_collection: list[int], item: int) -> int | None: - """Pure implementation of binary search algorithm in Python +def binary_search(sorted_collection: list[int], item: int) -> int: + """Pure implementation of a binary search algorithm in Python - Be careful collection must be ascending sorted, otherwise result will be + Be careful collection must be ascending sorted otherwise, the result will be unpredictable :param sorted_collection: some ascending sorted collection with comparable items :param item: item value to search - :return: index of found item or None if item is not found + :return: index of the found item or -1 if the item is not found Examples: >>> binary_search([0, 5, 7, 10, 15], 0) 0 - >>> binary_search([0, 5, 7, 10, 15], 15) 4 - >>> binary_search([0, 5, 7, 10, 15], 5) 1 - >>> binary_search([0, 5, 7, 10, 15], 6) - + -1 """ + if list(sorted_collection) != sorted(sorted_collection): + raise ValueError("sorted_collection must be sorted in ascending order") left = 0 right = len(sorted_collection) - 1 @@ -226,66 +211,66 @@ def binary_search(sorted_collection: list[int], item: int) -> int | None: right = midpoint - 1 else: left = midpoint + 1 - return None + return -1 -def binary_search_std_lib(sorted_collection: list[int], item: int) -> int | None: - """Pure implementation of binary search algorithm in Python using stdlib +def binary_search_std_lib(sorted_collection: list[int], item: int) -> int: + """Pure implementation of a binary search algorithm in Python using stdlib - Be careful collection must be ascending sorted, otherwise result will be + Be careful collection must be ascending sorted otherwise, the result will be unpredictable :param sorted_collection: some ascending sorted collection with comparable items :param item: item value to search - :return: index of found item or None if item is not found + :return: index of the found item or -1 if the item is not found Examples: >>> binary_search_std_lib([0, 5, 7, 10, 15], 0) 0 - >>> binary_search_std_lib([0, 5, 7, 10, 15], 15) 4 - >>> binary_search_std_lib([0, 5, 7, 10, 15], 5) 1 - >>> binary_search_std_lib([0, 5, 7, 10, 15], 6) - + -1 """ + if list(sorted_collection) != sorted(sorted_collection): + raise ValueError("sorted_collection must be sorted in ascending order") index = bisect.bisect_left(sorted_collection, item) if index != len(sorted_collection) and sorted_collection[index] == item: return index - return None + return -1 def binary_search_by_recursion( - sorted_collection: list[int], item: int, left: int, right: int -) -> int | None: - """Pure implementation of binary search algorithm in Python by recursion + sorted_collection: list[int], item: int, left: int = 0, right: int = -1 +) -> int: + """Pure implementation of a binary search algorithm in Python by recursion - Be careful collection must be ascending sorted, otherwise result will be + Be careful collection must be ascending sorted otherwise, the result will be unpredictable First recursion should be started with left=0 and right=(len(sorted_collection)-1) :param sorted_collection: some ascending sorted collection with comparable items :param item: item value to search - :return: index of found item or None if item is not found + :return: index of the found item or -1 if the item is not found Examples: >>> binary_search_by_recursion([0, 5, 7, 10, 15], 0, 0, 4) 0 - >>> binary_search_by_recursion([0, 5, 7, 10, 15], 15, 0, 4) 4 - >>> binary_search_by_recursion([0, 5, 7, 10, 15], 5, 0, 4) 1 - >>> binary_search_by_recursion([0, 5, 7, 10, 15], 6, 0, 4) - + -1 """ + if right < 0: + right = len(sorted_collection) - 1 + if list(sorted_collection) != sorted(sorted_collection): + raise ValueError("sorted_collection must be sorted in ascending order") if right < left: - return None + return -1 midpoint = left + (right - left) // 2 @@ -297,12 +282,78 @@ def binary_search_by_recursion( return binary_search_by_recursion(sorted_collection, item, midpoint + 1, right) +def exponential_search(sorted_collection: list[int], item: int) -> int: + """Pure implementation of an exponential search algorithm in Python + Resources used: + https://en.wikipedia.org/wiki/Exponential_search + + Be careful collection must be ascending sorted otherwise, result will be + unpredictable + + :param sorted_collection: some ascending sorted collection with comparable items + :param item: item value to search + :return: index of the found item or -1 if the item is not found + + the order of this algorithm is O(lg I) where I is index position of item if exist + + Examples: + >>> exponential_search([0, 5, 7, 10, 15], 0) + 0 + >>> exponential_search([0, 5, 7, 10, 15], 15) + 4 + >>> exponential_search([0, 5, 7, 10, 15], 5) + 1 + >>> exponential_search([0, 5, 7, 10, 15], 6) + -1 + """ + if list(sorted_collection) != sorted(sorted_collection): + raise ValueError("sorted_collection must be sorted in ascending order") + bound = 1 + while bound < len(sorted_collection) and sorted_collection[bound] < item: + bound *= 2 + left = bound // 2 + right = min(bound, len(sorted_collection) - 1) + last_result = binary_search_by_recursion( + sorted_collection=sorted_collection, item=item, left=left, right=right + ) + if last_result is None: + return -1 + return last_result + + +searches = ( # Fastest to slowest... + binary_search_std_lib, + binary_search, + exponential_search, + binary_search_by_recursion, +) + + if __name__ == "__main__": - user_input = input("Enter numbers separated by comma:\n").strip() + import doctest + import timeit + + doctest.testmod() + for search in searches: + name = f"{search.__name__:>26}" + print(f"{name}: {search([0, 5, 7, 10, 15], 10) = }") # type: ignore[operator] + + print("\nBenchmarks...") + setup = "collection = range(1000)" + for search in searches: + name = search.__name__ + print( + f"{name:>26}:", + timeit.timeit( + f"{name}(collection, 500)", setup=setup, number=5_000, globals=globals() + ), + ) + + user_input = input("\nEnter numbers separated by comma: ").strip() collection = sorted(int(item) for item in user_input.split(",")) - target = int(input("Enter a single number to be found in the list:\n")) - result = binary_search(collection, target) - if result is None: + target = int(input("Enter a single number to be found in the list: ")) + result = binary_search(sorted_collection=collection, item=target) + if result == -1: print(f"{target} was not found in {collection}.") else: - print(f"{target} was found at position {result} in {collection}.") + print(f"{target} was found at position {result} of {collection}.") From 4707fdb0f27bdc1e7442ce5940da335d58885104 Mon Sep 17 00:00:00 2001 From: Saptadeep Banerjee <69459134+imSanko@users.noreply.github.com> Date: Sun, 22 Oct 2023 03:35:37 +0530 Subject: [PATCH 190/306] Add tests for Perfect_Number (#10745) * Added new tests! * [ADD]: Inproved Tests * fixed * Removed spaces * Changed the file name * Added Changes * changed the code and kept the test cases * changed the code and kept the test cases * missed the line * removed spaces * Update power_using_recursion.py * Added new tests in Signum * Few things added * Removed few stuff and added few changes * Fixed few things * Reverted the function * Update maths/signum.py Co-authored-by: Christian Clauss * Added few things * Update maths/signum.py Co-authored-by: Christian Clauss * Added the type hint back * Update signum.py * Added NEW tests for Perfect_Number * Update maths/special_numbers/perfect_number.py Co-authored-by: Christian Clauss * Added the line back * Update maths/special_numbers/perfect_number.py Co-authored-by: Christian Clauss * Fixed a space * Updated * Reverted changes * Added the old code and FIXED few LINES * Fixed few things * Changed Test CASES * Update perfect_number.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/perfect_number.py | 77 +++++++++++++++++++++++++ maths/special_numbers/perfect_number.py | 29 ++++++++-- 2 files changed, 102 insertions(+), 4 deletions(-) create mode 100644 maths/perfect_number.py diff --git a/maths/perfect_number.py b/maths/perfect_number.py new file mode 100644 index 000000000..df6b6e3d9 --- /dev/null +++ b/maths/perfect_number.py @@ -0,0 +1,77 @@ +""" +== Perfect Number == +In number theory, a perfect number is a positive integer that is equal to the sum of +its positive divisors, excluding the number itself. +For example: 6 ==> divisors[1, 2, 3, 6] + Excluding 6, the sum(divisors) is 1 + 2 + 3 = 6 + So, 6 is a Perfect Number + +Other examples of Perfect Numbers: 28, 486, ... + +https://en.wikipedia.org/wiki/Perfect_number +""" + + +def perfect(number: int) -> bool: + """ + Check if a number is a perfect number. + + A perfect number is a positive integer that is equal to the sum of its proper + divisors (excluding itself). + + Args: + number: The number to be checked. + + Returns: + True if the number is a perfect number otherwise, False. + Start from 1 because dividing by 0 will raise ZeroDivisionError. + A number at most can be divisible by the half of the number except the number + itself. For example, 6 is at most can be divisible by 3 except by 6 itself. + Examples: + >>> perfect(27) + False + >>> perfect(28) + True + >>> perfect(29) + False + >>> perfect(6) + True + >>> perfect(12) + False + >>> perfect(496) + True + >>> perfect(8128) + True + >>> perfect(0) + False + >>> perfect(-1) + False + >>> perfect(12.34) + Traceback (most recent call last): + ... + ValueError: number must an integer + >>> perfect("Hello") + Traceback (most recent call last): + ... + ValueError: number must an integer + """ + if not isinstance(number, int): + raise ValueError("number must an integer") + if number <= 0: + return False + return sum(i for i in range(1, number // 2 + 1) if number % i == 0) == number + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + print("Program to check whether a number is a Perfect number or not...") + try: + number = int(input("Enter a positive integer: ").strip()) + except ValueError: + msg = "number must an integer" + print(msg) + raise ValueError(msg) + + print(f"{number} is {'' if perfect(number) else 'not '}a Perfect Number.") diff --git a/maths/special_numbers/perfect_number.py b/maths/special_numbers/perfect_number.py index 148e988fb..160ab2d96 100644 --- a/maths/special_numbers/perfect_number.py +++ b/maths/special_numbers/perfect_number.py @@ -14,16 +14,37 @@ https://en.wikipedia.org/wiki/Perfect_number def perfect(number: int) -> bool: """ + Check if a number is a perfect number. + + A perfect number is a positive integer that is equal to the sum of its proper + divisors (excluding itself). + + Args: + number: The number to be checked. + + Returns: + True if the number is a perfect number, False otherwise. + + Examples: >>> perfect(27) False >>> perfect(28) True >>> perfect(29) False - - Start from 1 because dividing by 0 will raise ZeroDivisionError. - A number at most can be divisible by the half of the number except the number - itself. For example, 6 is at most can be divisible by 3 except by 6 itself. + >>> perfect(6) + True + >>> perfect(12) + False + >>> perfect(496) + True + >>> perfect(8128) + True + >>> perfect(0) + >>> perfect(-3) + >>> perfect(12.34) + >>> perfect("day") + >>> perfect(["call"]) """ return sum(i for i in range(1, number // 2 + 1) if number % i == 0) == number From d9562033f6b15c17e0b48181c087731751abd7a6 Mon Sep 17 00:00:00 2001 From: Barun Parua <76466796+Baron105@users.noreply.github.com> Date: Sun, 22 Oct 2023 04:03:50 +0530 Subject: [PATCH 191/306] added a function to calculate perceived frequency by observer using Doppler Effect (#10776) * avg and mps speed formulae added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * avg and mps speed formulae added * fixed_spacing * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * spacing * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ws * added amicable numbers * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added amicable numbers * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * spacing * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * removed * changed name of file and added code improvements * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * issues fixed due to pi * requested changes added * Created doppler_effect_of_sound.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated doppler_effect_of_sound.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added desc names * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixed spacing * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * renamed doppler_effect_of_sound.py to doppler_frequency.py * used expection handling rather than print statements * fixed spacing for ruff * Update doppler_frequency.py This is super slick! Well done. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/special_numbers/perfect_number.py | 32 +++++- .../{perceptron.py => perceptron.py.DISABLED} | 0 physics/doppler_frequency.py | 104 ++++++++++++++++++ 3 files changed, 132 insertions(+), 4 deletions(-) rename neural_network/{perceptron.py => perceptron.py.DISABLED} (100%) create mode 100644 physics/doppler_frequency.py diff --git a/maths/special_numbers/perfect_number.py b/maths/special_numbers/perfect_number.py index 160ab2d96..a022dc677 100644 --- a/maths/special_numbers/perfect_number.py +++ b/maths/special_numbers/perfect_number.py @@ -25,6 +25,10 @@ def perfect(number: int) -> bool: Returns: True if the number is a perfect number, False otherwise. + Start from 1 because dividing by 0 will raise ZeroDivisionError. + A number at most can be divisible by the half of the number except the number + itself. For example, 6 is at most can be divisible by 3 except by 6 itself. + Examples: >>> perfect(27) False @@ -41,15 +45,35 @@ def perfect(number: int) -> bool: >>> perfect(8128) True >>> perfect(0) - >>> perfect(-3) + False + >>> perfect(-1) + False >>> perfect(12.34) - >>> perfect("day") - >>> perfect(["call"]) + Traceback (most recent call last): + ... + ValueError: number must be an integer + >>> perfect("Hello") + Traceback (most recent call last): + ... + ValueError: number must be an integer """ + if not isinstance(number, int): + raise ValueError("number must be an integer") + if number <= 0: + return False return sum(i for i in range(1, number // 2 + 1) if number % i == 0) == number if __name__ == "__main__": + from doctest import testmod + + testmod() print("Program to check whether a number is a Perfect number or not...") - number = int(input("Enter number: ").strip()) + try: + number = int(input("Enter a positive integer: ").strip()) + except ValueError: + msg = "number must be an integer" + print(msg) + raise ValueError(msg) + print(f"{number} is {'' if perfect(number) else 'not '}a Perfect Number.") diff --git a/neural_network/perceptron.py b/neural_network/perceptron.py.DISABLED similarity index 100% rename from neural_network/perceptron.py rename to neural_network/perceptron.py.DISABLED diff --git a/physics/doppler_frequency.py b/physics/doppler_frequency.py new file mode 100644 index 000000000..2a761c72d --- /dev/null +++ b/physics/doppler_frequency.py @@ -0,0 +1,104 @@ +""" +Doppler's effect + +The Doppler effect (also Doppler shift) is the change in the frequency of a wave in +relation to an observer who is moving relative to the source of the wave. The Doppler +effect is named after the physicist Christian Doppler. A common example of Doppler +shift is the change of pitch heard when a vehicle sounding a horn approaches and +recedes from an observer. + +The reason for the Doppler effect is that when the source of the waves is moving +towards the observer, each successive wave crest is emitted from a position closer to +the observer than the crest of the previous wave. Therefore, each wave takes slightly +less time to reach the observer than the previous wave. Hence, the time between the +arrivals of successive wave crests at the observer is reduced, causing an increase in +the frequency. Similarly, if the source of waves is moving away from the observer, +each wave is emitted from a position farther from the observer than the previous wave, +so the arrival time between successive waves is increased, reducing the frequency. + +If the source of waves is stationary but the observer is moving with respect to the +source, the transmission velocity of the waves changes (ie the rate at which the +observer receives waves) even if the wavelength and frequency emitted from the source +remain constant. + +These results are all summarized by the Doppler formula: + + f = (f0 * (v + v0)) / (v - vs) + +where: + f: frequency of the wave + f0: frequency of the wave when the source is stationary + v: velocity of the wave in the medium + v0: velocity of the observer, positive if the observer is moving towards the source + vs: velocity of the source, positive if the source is moving towards the observer + +Doppler's effect has many applications in physics and engineering, such as radar, +astronomy, medical imaging, and seismology. + +References: +https://en.wikipedia.org/wiki/Doppler_effect + +Now, we will implement a function that calculates the frequency of a wave as a function +of the frequency of the wave when the source is stationary, the velocity of the wave +in the medium, the velocity of the observer and the velocity of the source. +""" + + +def doppler_effect( + org_freq: float, wave_vel: float, obs_vel: float, src_vel: float +) -> float: + """ + Input Parameters: + ----------------- + org_freq: frequency of the wave when the source is stationary + wave_vel: velocity of the wave in the medium + obs_vel: velocity of the observer, +ve if the observer is moving towards the source + src_vel: velocity of the source, +ve if the source is moving towards the observer + + Returns: + -------- + f: frequency of the wave as perceived by the observer + + Docstring Tests: + >>> doppler_effect(100, 330, 10, 0) # observer moving towards the source + 103.03030303030303 + >>> doppler_effect(100, 330, -10, 0) # observer moving away from the source + 96.96969696969697 + >>> doppler_effect(100, 330, 0, 10) # source moving towards the observer + 103.125 + >>> doppler_effect(100, 330, 0, -10) # source moving away from the observer + 97.05882352941177 + >>> doppler_effect(100, 330, 10, 10) # source & observer moving towards each other + 106.25 + >>> doppler_effect(100, 330, -10, -10) # source and observer moving away + 94.11764705882354 + >>> doppler_effect(100, 330, 10, 330) # source moving at same speed as the wave + Traceback (most recent call last): + ... + ZeroDivisionError: Division by zero implies vs=v and observer in front of the source + >>> doppler_effect(100, 330, 10, 340) # source moving faster than the wave + Traceback (most recent call last): + ... + ValueError: Non-positive frequency implies vs>v or v0>v (in the opposite direction) + >>> doppler_effect(100, 330, -340, 10) # observer moving faster than the wave + Traceback (most recent call last): + ... + ValueError: Non-positive frequency implies vs>v or v0>v (in the opposite direction) + """ + + if wave_vel == src_vel: + raise ZeroDivisionError( + "Division by zero implies vs=v and observer in front of the source" + ) + doppler_freq = (org_freq * (wave_vel + obs_vel)) / (wave_vel - src_vel) + if doppler_freq <= 0: + raise ValueError( + "Non-positive frequency implies vs>v or v0>v (in the opposite direction)" + ) + return doppler_freq + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From c92e86bd7950b443fe39ccb19b587df44feaa068 Mon Sep 17 00:00:00 2001 From: "Precious C. Jacob" <72174492+PreciousJac0b@users.noreply.github.com> Date: Sun, 22 Oct 2023 00:33:49 +0100 Subject: [PATCH 192/306] Add tests to data_structures/linked_list/swap_nodes.py (#10751) * Added doctests to the swap_nodes file under linkedlist data structure * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added doctests to the swap_nodes file under linkedlist data structure * Added doctests to the swap_nodes file under linkedlist data structure * Added doctests to the swap_nodes file under linkedlist data structure * Update swap_nodes.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- data_structures/linked_list/swap_nodes.py | 156 +++++++++++++++------- 1 file changed, 110 insertions(+), 46 deletions(-) diff --git a/data_structures/linked_list/swap_nodes.py b/data_structures/linked_list/swap_nodes.py index 31dcb02bf..d66512087 100644 --- a/data_structures/linked_list/swap_nodes.py +++ b/data_structures/linked_list/swap_nodes.py @@ -1,49 +1,73 @@ +from __future__ import annotations + +from collections.abc import Iterator +from dataclasses import dataclass from typing import Any +@dataclass class Node: - def __init__(self, data: Any) -> None: - """ - Initialize a new Node with the given data. - - Args: - data: The data to be stored in the node. - - """ - self.data = data - self.next: Node | None = None # Reference to the next node + data: Any + next_node: Node | None = None +@dataclass class LinkedList: - def __init__(self) -> None: - """ - Initialize an empty Linked List. - """ - self.head: Node | None = None # Reference to the head (first node) + head: Node | None = None - def print_list(self): + def __iter__(self) -> Iterator: """ - Print the elements of the Linked List in order. + >>> linked_list = LinkedList() + >>> list(linked_list) + [] + >>> linked_list.push(0) + >>> tuple(linked_list) + (0,) """ - temp = self.head - while temp is not None: - print(temp.data, end=" ") - temp = temp.next - print() + node = self.head + while node: + yield node.data + node = node.next_node + + def __len__(self) -> int: + """ + >>> linked_list = LinkedList() + >>> len(linked_list) + 0 + >>> linked_list.push(0) + >>> len(linked_list) + 1 + """ + return sum(1 for _ in self) def push(self, new_data: Any) -> None: """ Add a new node with the given data to the beginning of the Linked List. + Args: new_data (Any): The data to be added to the new node. + + Returns: + None + + Examples: + >>> linked_list = LinkedList() + >>> linked_list.push(5) + >>> linked_list.push(4) + >>> linked_list.push(3) + >>> linked_list.push(2) + >>> linked_list.push(1) + >>> list(linked_list) + [1, 2, 3, 4, 5] """ new_node = Node(new_data) - new_node.next = self.head + new_node.next_node = self.head self.head = new_node - def swap_nodes(self, node_data_1, node_data_2) -> None: + def swap_nodes(self, node_data_1: Any, node_data_2: Any) -> None: """ Swap the positions of two nodes in the Linked List based on their data values. + Args: node_data_1: Data value of the first node to be swapped. node_data_2: Data value of the second node to be swapped. @@ -51,34 +75,74 @@ class LinkedList: Note: If either of the specified data values isn't found then, no swapping occurs. + + Examples: + When both values are present in a linked list. + >>> linked_list = LinkedList() + >>> linked_list.push(5) + >>> linked_list.push(4) + >>> linked_list.push(3) + >>> linked_list.push(2) + >>> linked_list.push(1) + >>> list(linked_list) + [1, 2, 3, 4, 5] + >>> linked_list.swap_nodes(1, 5) + >>> tuple(linked_list) + (5, 2, 3, 4, 1) + + When one value is present and the other isn't in the linked list. + >>> second_list = LinkedList() + >>> second_list.push(6) + >>> second_list.push(7) + >>> second_list.push(8) + >>> second_list.push(9) + >>> second_list.swap_nodes(1, 6) is None + True + + When both values are absent in the linked list. + >>> second_list = LinkedList() + >>> second_list.push(10) + >>> second_list.push(9) + >>> second_list.push(8) + >>> second_list.push(7) + >>> second_list.swap_nodes(1, 3) is None + True + + When linkedlist is empty. + >>> second_list = LinkedList() + >>> second_list.swap_nodes(1, 3) is None + True + + Returns: + None """ if node_data_1 == node_data_2: return - else: - node_1 = self.head - while node_1 is not None and node_1.data != node_data_1: - node_1 = node_1.next - node_2 = self.head - while node_2 is not None and node_2.data != node_data_2: - node_2 = node_2.next - - if node_1 is None or node_2 is None: - return - - # Swap the data values of the two nodes - node_1.data, node_2.data = node_2.data, node_1.data + node_1 = self.head + while node_1 and node_1.data != node_data_1: + node_1 = node_1.next_node + node_2 = self.head + while node_2 and node_2.data != node_data_2: + node_2 = node_2.next_node + if node_1 is None or node_2 is None: + return + # Swap the data values of the two nodes + node_1.data, node_2.data = node_2.data, node_1.data if __name__ == "__main__": - ll = LinkedList() + """ + Python script that outputs the swap of nodes in a linked list. + """ + from doctest import testmod + + testmod() + linked_list = LinkedList() for i in range(5, 0, -1): - ll.push(i) + linked_list.push(i) - print("Original Linked List:") - ll.print_list() - - ll.swap_nodes(1, 4) - print("After swapping the nodes whose data is 1 and 4:") - - ll.print_list() + print(f"Original Linked List: {list(linked_list)}") + linked_list.swap_nodes(1, 4) + print(f"Modified Linked List: {list(linked_list)}") + print("After swapping the nodes whose data is 1 and 4.") From d73a4c2ee035698de437086230985574766f195b Mon Sep 17 00:00:00 2001 From: santiditomas <72716997+santiditomas@users.noreply.github.com> Date: Sat, 21 Oct 2023 20:59:41 -0300 Subject: [PATCH 193/306] adding new physics algorithm: center of mass (#10743) * adding new physics algorithm: center of mass * Add changes requested by the reviewer * Add changes requested by the reviewer * Update center_of_mass.py * Update center_of_mass.py --------- Co-authored-by: Christian Clauss --- physics/center_of_mass.py | 109 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 109 insertions(+) create mode 100644 physics/center_of_mass.py diff --git a/physics/center_of_mass.py b/physics/center_of_mass.py new file mode 100644 index 000000000..bd9ba2480 --- /dev/null +++ b/physics/center_of_mass.py @@ -0,0 +1,109 @@ +""" +Calculating the center of mass for a discrete system of particles, given their +positions and masses. + +Description: + +In physics, the center of mass of a distribution of mass in space (sometimes referred +to as the barycenter or balance point) is the unique point at any given time where the +weighted relative position of the distributed mass sums to zero. This is the point to +which a force may be applied to cause a linear acceleration without an angular +acceleration. + +Calculations in mechanics are often simplified when formulated with respect to the +center of mass. It is a hypothetical point where the entire mass of an object may be +assumed to be concentrated to visualize its motion. In other words, the center of mass +is the particle equivalent of a given object for the application of Newton's laws of +motion. + +In the case of a system of particles P_i, i = 1, ..., n , each with mass m_i that are +located in space with coordinates r_i, i = 1, ..., n , the coordinates R of the center +of mass corresponds to: + +R = (Σ(mi * ri) / Σ(mi)) + +Reference: https://en.wikipedia.org/wiki/Center_of_mass +""" +from collections import namedtuple + +Particle = namedtuple("Particle", "x y z mass") # noqa: PYI024 +Coord3D = namedtuple("Coord3D", "x y z") # noqa: PYI024 + + +def center_of_mass(particles: list[Particle]) -> Coord3D: + """ + Input Parameters + ---------------- + particles: list(Particle): + A list of particles where each particle is a tuple with it´s (x, y, z) position and + it´s mass. + + Returns + ------- + Coord3D: + A tuple with the coordinates of the center of mass (Xcm, Ycm, Zcm) rounded to two + decimal places. + + Examples + -------- + >>> center_of_mass([ + ... Particle(1.5, 4, 3.4, 4), + ... Particle(5, 6.8, 7, 8.1), + ... Particle(9.4, 10.1, 11.6, 12) + ... ]) + Coord3D(x=6.61, y=7.98, z=8.69) + + >>> center_of_mass([ + ... Particle(1, 2, 3, 4), + ... Particle(5, 6, 7, 8), + ... Particle(9, 10, 11, 12) + ... ]) + Coord3D(x=6.33, y=7.33, z=8.33) + + >>> center_of_mass([ + ... Particle(1, 2, 3, -4), + ... Particle(5, 6, 7, 8), + ... Particle(9, 10, 11, 12) + ... ]) + Traceback (most recent call last): + ... + ValueError: Mass of all particles must be greater than 0 + + >>> center_of_mass([ + ... Particle(1, 2, 3, 0), + ... Particle(5, 6, 7, 8), + ... Particle(9, 10, 11, 12) + ... ]) + Traceback (most recent call last): + ... + ValueError: Mass of all particles must be greater than 0 + + >>> center_of_mass([]) + Traceback (most recent call last): + ... + ValueError: No particles provided + """ + if not particles: + raise ValueError("No particles provided") + + if any(particle.mass <= 0 for particle in particles): + raise ValueError("Mass of all particles must be greater than 0") + + total_mass = sum(particle.mass for particle in particles) + + center_of_mass_x = round( + sum(particle.x * particle.mass for particle in particles) / total_mass, 2 + ) + center_of_mass_y = round( + sum(particle.y * particle.mass for particle in particles) / total_mass, 2 + ) + center_of_mass_z = round( + sum(particle.z * particle.mass for particle in particles) / total_mass, 2 + ) + return Coord3D(center_of_mass_x, center_of_mass_y, center_of_mass_z) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 0601b56173021fe96fb070d20085962b036e85c3 Mon Sep 17 00:00:00 2001 From: gio-puter <103840942+gio-puter@users.noreply.github.com> Date: Sat, 21 Oct 2023 22:42:26 -0700 Subject: [PATCH 194/306] Add tests without modifying code (#10740) * Contributes to #9943 Added doctest to largest_of_very_large_numbers.py Added doctest to word_patterns.py Added doctest to onepad_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Contributes to #9943 Added doctest to maths/largest_of_very_large_numbers.py Added doctest to strings/word_patterns.py Added doctest to ciphers/onepad_cipher.py * Add tests without modifying code #10740 Added test to maths/largest_of_very_large_numbers Added test to strings/word_patterns.py Added test to ciphers/onepad_cipher.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- ciphers/onepad_cipher.py | 37 ++++++++++++++++++++++++-- maths/largest_of_very_large_numbers.py | 13 +++++++++ strings/word_patterns.py | 21 +++++++++++++++ 3 files changed, 69 insertions(+), 2 deletions(-) diff --git a/ciphers/onepad_cipher.py b/ciphers/onepad_cipher.py index 4bfe35b71..c4fb22e14 100644 --- a/ciphers/onepad_cipher.py +++ b/ciphers/onepad_cipher.py @@ -4,7 +4,27 @@ import random class Onepad: @staticmethod def encrypt(text: str) -> tuple[list[int], list[int]]: - """Function to encrypt text using pseudo-random numbers""" + """ + Function to encrypt text using pseudo-random numbers + >>> Onepad().encrypt("") + ([], []) + >>> Onepad().encrypt([]) + ([], []) + >>> random.seed(1) + >>> Onepad().encrypt(" ") + ([6969], [69]) + >>> random.seed(1) + >>> Onepad().encrypt("Hello") + ([9729, 114756, 4653, 31309, 10492], [69, 292, 33, 131, 61]) + >>> Onepad().encrypt(1) + Traceback (most recent call last): + ... + TypeError: 'int' object is not iterable + >>> Onepad().encrypt(1.1) + Traceback (most recent call last): + ... + TypeError: 'float' object is not iterable + """ plain = [ord(i) for i in text] key = [] cipher = [] @@ -17,7 +37,20 @@ class Onepad: @staticmethod def decrypt(cipher: list[int], key: list[int]) -> str: - """Function to decrypt text using pseudo-random numbers.""" + """ + Function to decrypt text using pseudo-random numbers. + >>> Onepad().decrypt([], []) + '' + >>> Onepad().decrypt([35], []) + '' + >>> Onepad().decrypt([], [35]) + Traceback (most recent call last): + ... + IndexError: list index out of range + >>> random.seed(1) + >>> Onepad().decrypt([9729, 114756, 4653, 31309, 10492], [69, 292, 33, 131, 61]) + 'Hello' + """ plain = [] for i in range(len(key)): p = int((cipher[i] - (key[i]) ** 2) / key[i]) diff --git a/maths/largest_of_very_large_numbers.py b/maths/largest_of_very_large_numbers.py index 7e7fea004..eb5c121fd 100644 --- a/maths/largest_of_very_large_numbers.py +++ b/maths/largest_of_very_large_numbers.py @@ -4,6 +4,19 @@ import math def res(x, y): + """ + Reduces large number to a more manageable number + >>> res(5, 7) + 4.892790030352132 + >>> res(0, 5) + 0 + >>> res(3, 0) + 1 + >>> res(-1, 5) + Traceback (most recent call last): + ... + ValueError: math domain error + """ if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.log10(x) diff --git a/strings/word_patterns.py b/strings/word_patterns.py index d12d267e7..ed603e9fe 100644 --- a/strings/word_patterns.py +++ b/strings/word_patterns.py @@ -1,11 +1,32 @@ def get_word_pattern(word: str) -> str: """ + Returns numerical pattern of character appearances in given word + >>> get_word_pattern("") + '' + >>> get_word_pattern(" ") + '0' >>> get_word_pattern("pattern") '0.1.2.2.3.4.5' >>> get_word_pattern("word pattern") '0.1.2.3.4.5.6.7.7.8.2.9' >>> get_word_pattern("get word pattern") '0.1.2.3.4.5.6.7.3.8.9.2.2.1.6.10' + >>> get_word_pattern() + Traceback (most recent call last): + ... + TypeError: get_word_pattern() missing 1 required positional argument: 'word' + >>> get_word_pattern(1) + Traceback (most recent call last): + ... + AttributeError: 'int' object has no attribute 'upper' + >>> get_word_pattern(1.1) + Traceback (most recent call last): + ... + AttributeError: 'float' object has no attribute 'upper' + >>> get_word_pattern([]) + Traceback (most recent call last): + ... + AttributeError: 'list' object has no attribute 'upper' """ word = word.upper() next_num = 0 From 7d0f6e012acb42271652f9a398675305b7e270d2 Mon Sep 17 00:00:00 2001 From: Kento <75509362+nkstonks@users.noreply.github.com> Date: Sun, 22 Oct 2023 20:08:08 +1100 Subject: [PATCH 195/306] Updated doctests for nor_gate (#10791) * added other possible cases * added test for correct output of truth table * updating DIRECTORY.md * Update nor_gate.py --------- Co-authored-by: = <=> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 6 ++-- boolean_algebra/nor_gate.py | 55 +++++++++++++++++++++++++------------ 2 files changed, 41 insertions(+), 20 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 9e0166ad8..c37c4f99b 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -541,7 +541,7 @@ * [Dimensionality Reduction](machine_learning/dimensionality_reduction.py) * Forecasting * [Run](machine_learning/forecasting/run.py) - * [Frequent Pattern Growth Algorithm](machine_learning/frequent_pattern_growth.py) + * [Frequent Pattern Growth](machine_learning/frequent_pattern_growth.py) * [Gradient Descent](machine_learning/gradient_descent.py) * [K Means Clust](machine_learning/k_means_clust.py) * [K Nearest Neighbours](machine_learning/k_nearest_neighbours.py) @@ -649,6 +649,7 @@ * [Numerical Integration](maths/numerical_integration.py) * [Odd Sieve](maths/odd_sieve.py) * [Perfect Cube](maths/perfect_cube.py) + * [Perfect Number](maths/perfect_number.py) * [Perfect Square](maths/perfect_square.py) * [Persistence](maths/persistence.py) * [Pi Generator](maths/pi_generator.py) @@ -767,7 +768,6 @@ * [Swish](neural_network/activation_functions/swish.py) * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](neural_network/convolution_neural_network.py) - * [Perceptron](neural_network/perceptron.py) * [Simple Neural Network](neural_network/simple_neural_network.py) ## Other @@ -803,8 +803,10 @@ * [Archimedes Principle Of Buoyant Force](physics/archimedes_principle_of_buoyant_force.py) * [Basic Orbital Capture](physics/basic_orbital_capture.py) * [Casimir Effect](physics/casimir_effect.py) + * [Center Of Mass](physics/center_of_mass.py) * [Centripetal Force](physics/centripetal_force.py) * [Coulombs Law](physics/coulombs_law.py) + * [Doppler Frequency](physics/doppler_frequency.py) * [Grahams Law](physics/grahams_law.py) * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py) * [Hubble Parameter](physics/hubble_parameter.py) diff --git a/boolean_algebra/nor_gate.py b/boolean_algebra/nor_gate.py index 2c27b80af..0c8ab1c0a 100644 --- a/boolean_algebra/nor_gate.py +++ b/boolean_algebra/nor_gate.py @@ -1,15 +1,18 @@ """ -A NOR Gate is a logic gate in boolean algebra which results to false(0) -if any of the input is 1, and True(1) if both the inputs are 0. +A NOR Gate is a logic gate in boolean algebra which results in false(0) if any of the +inputs is 1, and True(1) if all inputs are 0. Following is the truth table of a NOR Gate: - | Input 1 | Input 2 | Output | - | 0 | 0 | 1 | - | 0 | 1 | 0 | - | 1 | 0 | 0 | - | 1 | 1 | 0 | + Truth Table of NOR Gate: + | Input 1 | Input 2 | Output | + | 0 | 0 | 1 | + | 0 | 1 | 0 | + | 1 | 0 | 0 | + | 1 | 1 | 0 | -Following is the code implementation of the NOR Gate + Code provided by Akshaj Vishwanathan +https://www.geeksforgeeks.org/logic-gates-in-python """ +from collections.abc import Callable def nor_gate(input_1: int, input_2: int) -> int: @@ -30,19 +33,35 @@ def nor_gate(input_1: int, input_2: int) -> int: return int(input_1 == input_2 == 0) -def main() -> None: - print("Truth Table of NOR Gate:") - print("| Input 1 | Input 2 | Output |") - print(f"| 0 | 0 | {nor_gate(0, 0)} |") - print(f"| 0 | 1 | {nor_gate(0, 1)} |") - print(f"| 1 | 0 | {nor_gate(1, 0)} |") - print(f"| 1 | 1 | {nor_gate(1, 1)} |") +def truth_table(func: Callable) -> str: + """ + >>> print(truth_table(nor_gate)) + Truth Table of NOR Gate: + | Input 1 | Input 2 | Output | + | 0 | 0 | 1 | + | 0 | 1 | 0 | + | 1 | 0 | 0 | + | 1 | 1 | 0 | + """ + + def make_table_row(items: list | tuple) -> str: + """ + >>> make_table_row(("One", "Two", "Three")) + '| One | Two | Three |' + """ + return f"| {' | '.join(f'{item:^8}' for item in items)} |" + + return "\n".join( + ( + "Truth Table of NOR Gate:", + make_table_row(("Input 1", "Input 2", "Output")), + *[make_table_row((i, j, func(i, j))) for i in (0, 1) for j in (0, 1)], + ) + ) if __name__ == "__main__": import doctest doctest.testmod() - main() -"""Code provided by Akshaj Vishwanathan""" -"""Reference: https://www.geeksforgeeks.org/logic-gates-in-python/""" + print(truth_table(nor_gate)) From 6c8743f1e62c785e58a45f785b380f27693aadf9 Mon Sep 17 00:00:00 2001 From: Jeel Gajera <83470656+JeelGajera@users.noreply.github.com> Date: Sun, 22 Oct 2023 19:21:30 +0530 Subject: [PATCH 196/306] Add: Time Conversion Function (#10749) * Add: Time Conversion Function * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update conversions/time_conversions.py Co-authored-by: Christian Clauss * fix: required changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: err * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update time_conversions.py --------- Co-authored-by: Jeel Gajera Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 + conversions/time_conversions.py | 86 +++++++++++++++++++++++++++++++++ 2 files changed, 87 insertions(+) create mode 100644 conversions/time_conversions.py diff --git a/DIRECTORY.md b/DIRECTORY.md index c37c4f99b..f45102ae1 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -174,6 +174,7 @@ * [Roman Numerals](conversions/roman_numerals.py) * [Speed Conversions](conversions/speed_conversions.py) * [Temperature Conversions](conversions/temperature_conversions.py) + * [Time Conversions](conversions/time_conversions.py) * [Volume Conversions](conversions/volume_conversions.py) * [Weight Conversion](conversions/weight_conversion.py) diff --git a/conversions/time_conversions.py b/conversions/time_conversions.py new file mode 100644 index 000000000..8c30f5bc4 --- /dev/null +++ b/conversions/time_conversions.py @@ -0,0 +1,86 @@ +""" +A unit of time is any particular time interval, used as a standard way of measuring or +expressing duration. The base unit of time in the International System of Units (SI), +and by extension most of the Western world, is the second, defined as about 9 billion +oscillations of the caesium atom. + +https://en.wikipedia.org/wiki/Unit_of_time +""" + +time_chart: dict[str, float] = { + "seconds": 1.0, + "minutes": 60.0, # 1 minute = 60 sec + "hours": 3600.0, # 1 hour = 60 minutes = 3600 seconds + "days": 86400.0, # 1 day = 24 hours = 1440 min = 86400 sec + "weeks": 604800.0, # 1 week=7d=168hr=10080min = 604800 sec + "months": 2629800.0, # Approximate value for a month in seconds + "years": 31557600.0, # Approximate value for a year in seconds +} + +time_chart_inverse: dict[str, float] = { + key: 1 / value for key, value in time_chart.items() +} + + +def convert_time(time_value: float, unit_from: str, unit_to: str) -> float: + """ + Convert time from one unit to another using the time_chart above. + + >>> convert_time(3600, "seconds", "hours") + 1.0 + >>> convert_time(3500, "Seconds", "Hours") + 0.972 + >>> convert_time(1, "DaYs", "hours") + 24.0 + >>> convert_time(120, "minutes", "SeCoNdS") + 7200.0 + >>> convert_time(2, "WEEKS", "days") + 14.0 + >>> convert_time(0.5, "hours", "MINUTES") + 30.0 + >>> convert_time(-3600, "seconds", "hours") + Traceback (most recent call last): + ... + ValueError: 'time_value' must be a non-negative number. + >>> convert_time("Hello", "hours", "minutes") + Traceback (most recent call last): + ... + ValueError: 'time_value' must be a non-negative number. + >>> convert_time([0, 1, 2], "weeks", "days") + Traceback (most recent call last): + ... + ValueError: 'time_value' must be a non-negative number. + >>> convert_time(1, "cool", "century") # doctest: +ELLIPSIS + Traceback (most recent call last): + ... + ValueError: Invalid unit cool is not in seconds, minutes, hours, days, weeks, ... + >>> convert_time(1, "seconds", "hot") # doctest: +ELLIPSIS + Traceback (most recent call last): + ... + ValueError: Invalid unit hot is not in seconds, minutes, hours, days, weeks, ... + """ + if not isinstance(time_value, (int, float)) or time_value < 0: + msg = "'time_value' must be a non-negative number." + raise ValueError(msg) + + unit_from = unit_from.lower() + unit_to = unit_to.lower() + if unit_from not in time_chart or unit_to not in time_chart: + invalid_unit = unit_from if unit_from not in time_chart else unit_to + msg = f"Invalid unit {invalid_unit} is not in {', '.join(time_chart)}." + raise ValueError(msg) + + return round( + time_value * time_chart[unit_from] * time_chart_inverse[unit_to], + 3, + ) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + print(f"{convert_time(3600,'seconds', 'hours') = :,}") + print(f"{convert_time(360, 'days', 'months') = :,}") + print(f"{convert_time(360, 'months', 'years') = :,}") + print(f"{convert_time(1, 'years', 'seconds') = :,}") From a8b94abc8b9131e260a5281f4c95a0d4f2d03325 Mon Sep 17 00:00:00 2001 From: Hardik Pawar <97388607+Hardvan@users.noreply.github.com> Date: Mon, 23 Oct 2023 00:21:56 +0530 Subject: [PATCH 197/306] Enhance readability of N Queens (#9265) * Enhance readability of N Queens * Simplify is_safe code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- backtracking/n_queens.py | 53 ++++++++++++++++++++-------------------- 1 file changed, 26 insertions(+), 27 deletions(-) diff --git a/backtracking/n_queens.py b/backtracking/n_queens.py index bbf0ce44f..0f237d95e 100644 --- a/backtracking/n_queens.py +++ b/backtracking/n_queens.py @@ -17,40 +17,39 @@ def is_safe(board: list[list[int]], row: int, column: int) -> bool: This function returns a boolean value True if it is safe to place a queen there considering the current state of the board. - Parameters : - board(2D matrix) : board - row ,column : coordinates of the cell on a board + Parameters: + board (2D matrix): The chessboard + row, column: Coordinates of the cell on the board - Returns : + Returns: Boolean Value """ - for i in range(len(board)): - if board[row][i] == 1: - return False - for i in range(len(board)): - if board[i][column] == 1: - return False - for i, j in zip(range(row, -1, -1), range(column, -1, -1)): - if board[i][j] == 1: - return False - for i, j in zip(range(row, -1, -1), range(column, len(board))): - if board[i][j] == 1: - return False - return True + + n = len(board) # Size of the board + + # Check if there is any queen in the same row, column, + # left upper diagonal, and right upper diagonal + return ( + all(board[i][j] != 1 for i, j in zip(range(row, -1, -1), range(column, n))) + and all( + board[i][j] != 1 for i, j in zip(range(row, -1, -1), range(column, -1, -1)) + ) + and all(board[i][j] != 1 for i, j in zip(range(row, n), range(column, n))) + and all(board[i][j] != 1 for i, j in zip(range(row, n), range(column, -1, -1))) + ) def solve(board: list[list[int]], row: int) -> bool: """ - It creates a state space tree and calls the safe function until it receives a - False Boolean and terminates that branch and backtracks to the next + This function creates a state space tree and calls the safe function until it + receives a False Boolean and terminates that branch and backtracks to the next possible solution branch. """ if row >= len(board): """ - If the row number exceeds N we have board with a successful combination + If the row number exceeds N, we have a board with a successful combination and that combination is appended to the solution list and the board is printed. - """ solution.append(board) printboard(board) @@ -58,9 +57,9 @@ def solve(board: list[list[int]], row: int) -> bool: return True for i in range(len(board)): """ - For every row it iterates through each column to check if it is feasible to + For every row, it iterates through each column to check if it is feasible to place a queen there. - If all the combinations for that particular branch are successful the board is + If all the combinations for that particular branch are successful, the board is reinitialized for the next possible combination. """ if is_safe(board, row, i): @@ -77,14 +76,14 @@ def printboard(board: list[list[int]]) -> None: for i in range(len(board)): for j in range(len(board)): if board[i][j] == 1: - print("Q", end=" ") + print("Q", end=" ") # Queen is present else: - print(".", end=" ") + print(".", end=" ") # Empty cell print() -# n=int(input("The no. of queens")) +# Number of queens (e.g., n=8 for an 8x8 board) n = 8 board = [[0 for i in range(n)] for j in range(n)] solve(board, 0) -print("The total no. of solutions are :", len(solution)) +print("The total number of solutions are:", len(solution)) From fdb0635c71318da758fafcda80154d03dbbd5c5a Mon Sep 17 00:00:00 2001 From: Anshu Sharma <142900182+AnshuSharma111@users.noreply.github.com> Date: Mon, 23 Oct 2023 03:09:31 +0530 Subject: [PATCH 198/306] added doctest to playfair_cipher.py (#10823) * added doctest to playfair_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added newline to EOF andremoved trailing whitespace * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review --------- Co-authored-by: Keyboard-1 <142900182+Keyboard-1@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- ciphers/playfair_cipher.py | 59 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 57 insertions(+), 2 deletions(-) diff --git a/ciphers/playfair_cipher.py b/ciphers/playfair_cipher.py index 7279fb23e..86b45bc4f 100644 --- a/ciphers/playfair_cipher.py +++ b/ciphers/playfair_cipher.py @@ -1,3 +1,24 @@ +""" +https://en.wikipedia.org/wiki/Playfair_cipher#Description + +The Playfair cipher was developed by Charles Wheatstone in 1854 +It's use was heavily promotedby Lord Playfair, hence its name + +Some features of the Playfair cipher are: + +1) It was the first literal diagram substitution cipher +2) It is a manual symmetric encryption technique +3) It is a multiple letter encryption cipher + +The implementation in the code below encodes alphabets only. +It removes spaces, special characters and numbers from the +code. + +Playfair is no longer used by military forces because of known +insecurities and of the advent of automated encryption devices. +This cipher is regarded as insecure since before World War I. +""" + import itertools import string from collections.abc import Generator, Iterable @@ -60,11 +81,26 @@ def generate_table(key: str) -> list[str]: def encode(plaintext: str, key: str) -> str: + """ + Encode the given plaintext using the Playfair cipher. + Takes the plaintext and the key as input and returns the encoded string. + + >>> encode("Hello", "MONARCHY") + 'CFSUPM' + >>> encode("attack on the left flank", "EMERGENCY") + 'DQZSBYFSDZFMFNLOHFDRSG' + >>> encode("Sorry!", "SPECIAL") + 'AVXETX' + >>> encode("Number 1", "NUMBER") + 'UMBENF' + >>> encode("Photosynthesis!", "THE SUN") + 'OEMHQHVCHESUKE' + """ + table = generate_table(key) plaintext = prepare_input(plaintext) ciphertext = "" - # https://en.wikipedia.org/wiki/Playfair_cipher#Description for char1, char2 in chunker(plaintext, 2): row1, col1 = divmod(table.index(char1), 5) row2, col2 = divmod(table.index(char2), 5) @@ -83,10 +119,20 @@ def encode(plaintext: str, key: str) -> str: def decode(ciphertext: str, key: str) -> str: + """ + Decode the input string using the provided key. + + >>> decode("BMZFAZRZDH", "HAZARD") + 'FIREHAZARD' + >>> decode("HNBWBPQT", "AUTOMOBILE") + 'DRIVINGX' + >>> decode("SLYSSAQS", "CASTLE") + 'ATXTACKX' + """ + table = generate_table(key) plaintext = "" - # https://en.wikipedia.org/wiki/Playfair_cipher#Description for char1, char2 in chunker(ciphertext, 2): row1, col1 = divmod(table.index(char1), 5) row2, col2 = divmod(table.index(char2), 5) @@ -102,3 +148,12 @@ def decode(ciphertext: str, key: str) -> str: plaintext += table[row2 * 5 + col1] return plaintext + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + print("Encoded:", encode("BYE AND THANKS", "GREETING")) + print("Decoded:", decode("CXRBANRLBALQ", "GREETING")) From abd6bca074e8a846d5e306311845b46f7581012e Mon Sep 17 00:00:00 2001 From: Ankit Avinash <128812932+Void426@users.noreply.github.com> Date: Mon, 23 Oct 2023 10:54:51 +0530 Subject: [PATCH 199/306] Added Binary Focal Cross Entropy (#10674) * Added Binary Focal Cross Entropy * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed Issue * Fixed Issue * Added BFCE loss to loss_functions.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update machine_learning/loss_functions.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- machine_learning/loss_functions.py | 51 ++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index 0fa0956ed..ef3429636 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -39,6 +39,57 @@ def binary_cross_entropy( return np.mean(bce_loss) +def binary_focal_cross_entropy( + y_true: np.ndarray, + y_pred: np.ndarray, + gamma: float = 2.0, + alpha: float = 0.25, + epsilon: float = 1e-15, +) -> float: + """ + Calculate the mean binary focal cross-entropy (BFCE) loss between true labels + and predicted probabilities. + + BFCE loss quantifies dissimilarity between true labels (0 or 1) and predicted + probabilities. It's a variation of binary cross-entropy that addresses class + imbalance by focusing on hard examples. + + BCFE = -Σ(alpha * (1 - y_pred)**gamma * y_true * log(y_pred) + + (1 - alpha) * y_pred**gamma * (1 - y_true) * log(1 - y_pred)) + + Reference: [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf) + + Parameters: + - y_true: True binary labels (0 or 1). + - y_pred: Predicted probabilities for class 1. + - gamma: Focusing parameter for modulating the loss (default: 2.0). + - alpha: Weighting factor for class 1 (default: 0.25). + - epsilon: Small constant to avoid numerical instability. + + >>> true_labels = np.array([0, 1, 1, 0, 1]) + >>> predicted_probs = np.array([0.2, 0.7, 0.9, 0.3, 0.8]) + >>> binary_focal_cross_entropy(true_labels, predicted_probs) + 0.008257977659239775 + >>> true_labels = np.array([0, 1, 1, 0, 1]) + >>> predicted_probs = np.array([0.3, 0.8, 0.9, 0.2]) + >>> binary_focal_cross_entropy(true_labels, predicted_probs) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same length. + """ + if len(y_true) != len(y_pred): + raise ValueError("Input arrays must have the same length.") + # Clip predicted probabilities to avoid log(0) + y_pred = np.clip(y_pred, epsilon, 1 - epsilon) + + bcfe_loss = -( + alpha * (1 - y_pred) ** gamma * y_true * np.log(y_pred) + + (1 - alpha) * y_pred**gamma * (1 - y_true) * np.log(1 - y_pred) + ) + + return np.mean(bcfe_loss) + + def categorical_cross_entropy( y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-15 ) -> float: From dc4e89805a642d1c6e3fe031276edbfde3c1f40c Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Mon, 23 Oct 2023 10:57:33 +0530 Subject: [PATCH 200/306] Added docstring/documentation for sigmoid_function (#10756) * Added doctest for sigmoid_function * Added doctest for sigmoid_function * Added doctest for sigmoid_function --- machine_learning/logistic_regression.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/machine_learning/logistic_regression.py b/machine_learning/logistic_regression.py index 87bc8f668..f9da0104a 100644 --- a/machine_learning/logistic_regression.py +++ b/machine_learning/logistic_regression.py @@ -28,6 +28,21 @@ from sklearn import datasets def sigmoid_function(z): + """ + Also known as Logistic Function. + + 1 + f(x) = ------- + 1 + e⁻ˣ + + The sigmoid function approaches a value of 1 as its input 'x' becomes + increasing positive. Opposite for negative values. + + Reference: https://en.wikipedia.org/wiki/Sigmoid_function + + @param z: input to the function + @returns: returns value in the range 0 to 1 + """ return 1 / (1 + np.exp(-z)) From 68faebe711899bf6072ceedb16ccf1fbdc7d2434 Mon Sep 17 00:00:00 2001 From: Pratik Tripathy <117454569+SilverDragonOfR@users.noreply.github.com> Date: Mon, 23 Oct 2023 11:05:10 +0530 Subject: [PATCH 201/306] feat: Add mass energy equivalence in physics and doctests (#10202) * updating DIRECTORY.md * feat: Add mass energy equivalence in physics * updating DIRECTORY.md * updating DIRECTORY.md * Apply suggestions from code review * Update physics/mass_energy_equivalence.py * Update mass_energy_equivalence.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- DIRECTORY.md | 1 + physics/mass_energy_equivalence.py | 77 ++++++++++++++++++++++++++++++ 2 files changed, 78 insertions(+) create mode 100644 physics/mass_energy_equivalence.py diff --git a/DIRECTORY.md b/DIRECTORY.md index f45102ae1..c07e1550d 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -815,6 +815,7 @@ * [Kinetic Energy](physics/kinetic_energy.py) * [Lorentz Transformation Four Vector](physics/lorentz_transformation_four_vector.py) * [Malus Law](physics/malus_law.py) + * [Mass Energy Equivalence](physics/mass_energy_equivalence.py) * [Mirror Formulae](physics/mirror_formulae.py) * [N Body Simulation](physics/n_body_simulation.py) * [Newtons Law Of Gravitation](physics/newtons_law_of_gravitation.py) diff --git a/physics/mass_energy_equivalence.py b/physics/mass_energy_equivalence.py new file mode 100644 index 000000000..4a4c7890f --- /dev/null +++ b/physics/mass_energy_equivalence.py @@ -0,0 +1,77 @@ +""" +Title: +Finding the energy equivalence of mass and mass equivalence of energy +by Einstein's equation. + +Description: +Einstein's mass-energy equivalence is a pivotal concept in theoretical physics. +It asserts that energy (E) and mass (m) are directly related by the speed of +light in vacuum (c) squared, as described in the equation E = mc². This means that +mass and energy are interchangeable; a mass increase corresponds to an energy increase, +and vice versa. This principle has profound implications in nuclear reactions, +explaining the release of immense energy from minuscule changes in atomic nuclei. + +Equations: +E = mc² and m = E/c², where m is mass, E is Energy, c is speed of light in vacuum. + +Reference: +https://en.wikipedia.org/wiki/Mass%E2%80%93energy_equivalence +""" + +from scipy.constants import c # speed of light in vacuum (299792458 m/s) + + +def energy_from_mass(mass: float) -> float: + """ + Calculates the Energy equivalence of the Mass using E = mc² + in SI units J from Mass in kg. + + mass (float): Mass of body. + + Usage example: + >>> energy_from_mass(124.56) + 1.11948945063458e+19 + >>> energy_from_mass(320) + 2.8760165719578165e+19 + >>> energy_from_mass(0) + 0.0 + >>> energy_from_mass(-967.9) + Traceback (most recent call last): + ... + ValueError: Mass can't be negative. + + """ + if mass < 0: + raise ValueError("Mass can't be negative.") + return mass * c**2 + + +def mass_from_energy(energy: float) -> float: + """ + Calculates the Mass equivalence of the Energy using m = E/c² + in SI units kg from Energy in J. + + energy (float): Mass of body. + + Usage example: + >>> mass_from_energy(124.56) + 1.3859169098203872e-15 + >>> mass_from_energy(320) + 3.560480179371579e-15 + >>> mass_from_energy(0) + 0.0 + >>> mass_from_energy(-967.9) + Traceback (most recent call last): + ... + ValueError: Energy can't be negative. + + """ + if energy < 0: + raise ValueError("Energy can't be negative.") + return energy / c**2 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From abc390967d5479ec74bfd384a86cefa5ddbf6d40 Mon Sep 17 00:00:00 2001 From: Paarth Goyal <138299656+pluto-tofu@users.noreply.github.com> Date: Mon, 23 Oct 2023 11:13:30 +0530 Subject: [PATCH 202/306] =?UTF-8?q?Added=20the=20algorithm=20to=20compute?= =?UTF-8?q?=20the=20terminal=20velocity=20of=20an=20object=20fal=E2=80=A6?= =?UTF-8?q?=20(#10237)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * added the algorithm to compute the terminal velocity of an object falling in a fluid * fixed spelling mistake * fixed issues in topic description * imported the value of g from scipy and changed the doctests accordingly * fixed formatting * Apply suggestions from code review --------- Co-authored-by: Tianyi Zheng --- physics/terminal_velocity.py | 60 ++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 physics/terminal_velocity.py diff --git a/physics/terminal_velocity.py b/physics/terminal_velocity.py new file mode 100644 index 000000000..cec54162e --- /dev/null +++ b/physics/terminal_velocity.py @@ -0,0 +1,60 @@ +""" +Title : Computing the terminal velocity of an object falling + through a fluid. + +Terminal velocity is defined as the highest velocity attained by an +object falling through a fluid. It is observed when the sum of drag force +and buoyancy is equal to the downward gravity force acting on the +object. The acceleration of the object is zero as the net force acting on +the object is zero. + +Vt = ((2 * m * g)/(ρ * A * Cd))^0.5 + +where : +Vt = Terminal velocity (in m/s) +m = Mass of the falling object (in Kg) +g = Acceleration due to gravity (value taken : imported from scipy) +ρ = Density of the fluid through which the object is falling (in Kg/m^3) +A = Projected area of the object (in m^2) +Cd = Drag coefficient (dimensionless) + +Reference : https://byjus.com/physics/derivation-of-terminal-velocity/ +""" + +from scipy.constants import g + + +def terminal_velocity( + mass: float, density: float, area: float, drag_coefficient: float +) -> float: + """ + >>> terminal_velocity(1, 25, 0.6, 0.77) + 1.3031197996044768 + >>> terminal_velocity(2, 100, 0.45, 0.23) + 1.9467947148674276 + >>> terminal_velocity(5, 50, 0.2, 0.5) + 4.428690551393267 + >>> terminal_velocity(-5, 50, -0.2, -2) + Traceback (most recent call last): + ... + ValueError: mass, density, area and the drag coefficient all need to be positive + >>> terminal_velocity(3, -20, -1, 2) + Traceback (most recent call last): + ... + ValueError: mass, density, area and the drag coefficient all need to be positive + >>> terminal_velocity(-2, -1, -0.44, -1) + Traceback (most recent call last): + ... + ValueError: mass, density, area and the drag coefficient all need to be positive + """ + if mass <= 0 or density <= 0 or area <= 0 or drag_coefficient <= 0: + raise ValueError( + "mass, density, area and the drag coefficient all need to be positive" + ) + return ((2 * mass * g) / (density * area * drag_coefficient)) ** 0.5 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From a9cee1d933606092eb966a601eb1d9efd6e054af Mon Sep 17 00:00:00 2001 From: Dale Dai <145884899+CouldNot@users.noreply.github.com> Date: Sun, 22 Oct 2023 22:56:59 -0700 Subject: [PATCH 203/306] Add perfect cube binary search (#10477) * Add perfect cube binary search algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add support for testing negative perfect cubes * Add TypeError check for invalid inputs * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/perfect_cube.py | 43 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 41 insertions(+), 2 deletions(-) diff --git a/maths/perfect_cube.py b/maths/perfect_cube.py index 9ad287e41..a732b7cce 100644 --- a/maths/perfect_cube.py +++ b/maths/perfect_cube.py @@ -11,6 +11,45 @@ def perfect_cube(n: int) -> bool: return (val * val * val) == n +def perfect_cube_binary_search(n: int) -> bool: + """ + Check if a number is a perfect cube or not using binary search. + Time complexity : O(Log(n)) + Space complexity: O(1) + + >>> perfect_cube_binary_search(27) + True + >>> perfect_cube_binary_search(64) + True + >>> perfect_cube_binary_search(4) + False + >>> perfect_cube_binary_search("a") + Traceback (most recent call last): + ... + TypeError: perfect_cube_binary_search() only accepts integers + >>> perfect_cube_binary_search(0.1) + Traceback (most recent call last): + ... + TypeError: perfect_cube_binary_search() only accepts integers + """ + if not isinstance(n, int): + raise TypeError("perfect_cube_binary_search() only accepts integers") + if n < 0: + n = -n + left = 0 + right = n + while left <= right: + mid = left + (right - left) // 2 + if mid * mid * mid == n: + return True + elif mid * mid * mid < n: + left = mid + 1 + else: + right = mid - 1 + return False + + if __name__ == "__main__": - print(perfect_cube(27)) - print(perfect_cube(4)) + import doctest + + doctest.testmod() From a8b6bda993484b3be9fd541a10dd9ac9c4111dda Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Mon, 23 Oct 2023 03:31:30 -0400 Subject: [PATCH 204/306] Delete `arithmetic_analysis/` directory and relocate its contents (#10824) * Remove eval from arithmetic_analysis/newton_raphson.py * Relocate contents of arithmetic_analysis/ Delete the arithmetic_analysis/ directory and relocate its files because the purpose of the directory was always ill-defined. "Arithmetic analysis" isn't a field of math, and the directory's files contained algorithms for linear algebra, numerical analysis, and physics. Relocated the directory's linear algebra algorithms to linear_algebra/, its numerical analysis algorithms to a new subdirectory called maths/numerical_analysis/, and its single physics algorithm to physics/. * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 43 +- arithmetic_analysis/README.md | 7 - arithmetic_analysis/image_data/__init__.py | 0 .../gaussian_elimination.py | 0 .../jacobi_iteration_method.py | 406 +++++++++--------- .../lu_decomposition.py | 0 .../numerical_analysis}/bisection.py | 0 .../bisection_2.py} | 0 .../integration_by_simpson_approx.py | 0 .../numerical_analysis}/intersection.py | 0 .../nevilles_method.py | 0 .../newton_forward_interpolation.py | 0 .../numerical_analysis}/newton_method.py | 0 .../numerical_analysis}/newton_raphson.py | 35 +- .../newton_raphson_2.py} | 0 .../numerical_analysis}/newton_raphson_new.py | 0 .../numerical_integration.py | 0 maths/{ => numerical_analysis}/runge_kutta.py | 0 .../runge_kutta_fehlberg_45.py | 0 .../numerical_analysis}/secant_method.py | 0 .../{ => numerical_analysis}/simpson_rule.py | 0 maths/{ => numerical_analysis}/square_root.py | 0 .../image_data/2D_problems.jpg | Bin .../image_data/2D_problems_1.jpg | Bin .../image_data}/__init__.py | 0 .../in_static_equilibrium.py | 188 ++++---- 26 files changed, 335 insertions(+), 344 deletions(-) delete mode 100644 arithmetic_analysis/README.md delete mode 100644 arithmetic_analysis/image_data/__init__.py rename {arithmetic_analysis => linear_algebra}/gaussian_elimination.py (100%) rename {arithmetic_analysis => linear_algebra}/jacobi_iteration_method.py (96%) rename {arithmetic_analysis => linear_algebra}/lu_decomposition.py (100%) rename {arithmetic_analysis => maths/numerical_analysis}/bisection.py (100%) rename maths/{bisection.py => numerical_analysis/bisection_2.py} (100%) rename maths/{ => numerical_analysis}/integration_by_simpson_approx.py (100%) rename {arithmetic_analysis => maths/numerical_analysis}/intersection.py (100%) rename maths/{ => numerical_analysis}/nevilles_method.py (100%) rename {arithmetic_analysis => maths/numerical_analysis}/newton_forward_interpolation.py (100%) rename {arithmetic_analysis => maths/numerical_analysis}/newton_method.py (100%) rename {arithmetic_analysis => maths/numerical_analysis}/newton_raphson.py (61%) rename maths/{newton_raphson.py => numerical_analysis/newton_raphson_2.py} (100%) rename {arithmetic_analysis => maths/numerical_analysis}/newton_raphson_new.py (100%) rename maths/{ => numerical_analysis}/numerical_integration.py (100%) rename maths/{ => numerical_analysis}/runge_kutta.py (100%) rename maths/{ => numerical_analysis}/runge_kutta_fehlberg_45.py (100%) rename {arithmetic_analysis => maths/numerical_analysis}/secant_method.py (100%) rename maths/{ => numerical_analysis}/simpson_rule.py (100%) rename maths/{ => numerical_analysis}/square_root.py (100%) rename {arithmetic_analysis => physics}/image_data/2D_problems.jpg (100%) rename {arithmetic_analysis => physics}/image_data/2D_problems_1.jpg (100%) rename {arithmetic_analysis => physics/image_data}/__init__.py (100%) rename {arithmetic_analysis => physics}/in_static_equilibrium.py (96%) diff --git a/DIRECTORY.md b/DIRECTORY.md index c07e1550d..1e3711fe8 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1,17 +1,4 @@ -## Arithmetic Analysis - * [Bisection](arithmetic_analysis/bisection.py) - * [Gaussian Elimination](arithmetic_analysis/gaussian_elimination.py) - * [In Static Equilibrium](arithmetic_analysis/in_static_equilibrium.py) - * [Intersection](arithmetic_analysis/intersection.py) - * [Jacobi Iteration Method](arithmetic_analysis/jacobi_iteration_method.py) - * [Lu Decomposition](arithmetic_analysis/lu_decomposition.py) - * [Newton Forward Interpolation](arithmetic_analysis/newton_forward_interpolation.py) - * [Newton Method](arithmetic_analysis/newton_method.py) - * [Newton Raphson](arithmetic_analysis/newton_raphson.py) - * [Newton Raphson New](arithmetic_analysis/newton_raphson_new.py) - * [Secant Method](arithmetic_analysis/secant_method.py) - ## Audio Filters * [Butterworth Filter](audio_filters/butterworth_filter.py) * [Iir Filter](audio_filters/iir_filter.py) @@ -520,6 +507,9 @@ * [Test Knapsack](knapsack/tests/test_knapsack.py) ## Linear Algebra + * [Gaussian Elimination](linear_algebra/gaussian_elimination.py) + * [Jacobi Iteration Method](linear_algebra/jacobi_iteration_method.py) + * [Lu Decomposition](linear_algebra/lu_decomposition.py) * Src * [Conjugate Gradient](linear_algebra/src/conjugate_gradient.py) * [Lib](linear_algebra/src/lib.py) @@ -583,7 +573,6 @@ * [Binary Multiplication](maths/binary_multiplication.py) * [Binomial Coefficient](maths/binomial_coefficient.py) * [Binomial Distribution](maths/binomial_distribution.py) - * [Bisection](maths/bisection.py) * [Ceil](maths/ceil.py) * [Chebyshev Distance](maths/chebyshev_distance.py) * [Check Polygon](maths/check_polygon.py) @@ -617,7 +606,6 @@ * [Germain Primes](maths/germain_primes.py) * [Greatest Common Divisor](maths/greatest_common_divisor.py) * [Hardy Ramanujanalgo](maths/hardy_ramanujanalgo.py) - * [Integration By Simpson Approx](maths/integration_by_simpson_approx.py) * [Interquartile Range](maths/interquartile_range.py) * [Is Int Palindrome](maths/is_int_palindrome.py) * [Is Ip V4 Address Valid](maths/is_ip_v4_address_valid.py) @@ -644,10 +632,24 @@ * [Modular Exponential](maths/modular_exponential.py) * [Monte Carlo](maths/monte_carlo.py) * [Monte Carlo Dice](maths/monte_carlo_dice.py) - * [Nevilles Method](maths/nevilles_method.py) - * [Newton Raphson](maths/newton_raphson.py) * [Number Of Digits](maths/number_of_digits.py) - * [Numerical Integration](maths/numerical_integration.py) + * Numerical Analysis + * [Bisection](maths/numerical_analysis/bisection.py) + * [Bisection 2](maths/numerical_analysis/bisection_2.py) + * [Integration By Simpson Approx](maths/numerical_analysis/integration_by_simpson_approx.py) + * [Intersection](maths/numerical_analysis/intersection.py) + * [Nevilles Method](maths/numerical_analysis/nevilles_method.py) + * [Newton Forward Interpolation](maths/numerical_analysis/newton_forward_interpolation.py) + * [Newton Method](maths/numerical_analysis/newton_method.py) + * [Newton Raphson](maths/numerical_analysis/newton_raphson.py) + * [Newton Raphson 2](maths/numerical_analysis/newton_raphson_2.py) + * [Newton Raphson New](maths/numerical_analysis/newton_raphson_new.py) + * [Numerical Integration](maths/numerical_analysis/numerical_integration.py) + * [Runge Kutta](maths/numerical_analysis/runge_kutta.py) + * [Runge Kutta Fehlberg 45](maths/numerical_analysis/runge_kutta_fehlberg_45.py) + * [Secant Method](maths/numerical_analysis/secant_method.py) + * [Simpson Rule](maths/numerical_analysis/simpson_rule.py) + * [Square Root](maths/numerical_analysis/square_root.py) * [Odd Sieve](maths/odd_sieve.py) * [Perfect Cube](maths/perfect_cube.py) * [Perfect Number](maths/perfect_number.py) @@ -673,8 +675,6 @@ * [Radians](maths/radians.py) * [Radix2 Fft](maths/radix2_fft.py) * [Remove Digit](maths/remove_digit.py) - * [Runge Kutta](maths/runge_kutta.py) - * [Runge Kutta Fehlberg 45](maths/runge_kutta_fehlberg_45.py) * [Segmented Sieve](maths/segmented_sieve.py) * Series * [Arithmetic](maths/series/arithmetic.py) @@ -687,7 +687,6 @@ * [Sieve Of Eratosthenes](maths/sieve_of_eratosthenes.py) * [Sigmoid](maths/sigmoid.py) * [Signum](maths/signum.py) - * [Simpson Rule](maths/simpson_rule.py) * [Simultaneous Linear Equation Solver](maths/simultaneous_linear_equation_solver.py) * [Sin](maths/sin.py) * [Sock Merchant](maths/sock_merchant.py) @@ -709,7 +708,6 @@ * [Proth Number](maths/special_numbers/proth_number.py) * [Ugly Numbers](maths/special_numbers/ugly_numbers.py) * [Weird Number](maths/special_numbers/weird_number.py) - * [Square Root](maths/square_root.py) * [Sum Of Arithmetic Series](maths/sum_of_arithmetic_series.py) * [Sum Of Digits](maths/sum_of_digits.py) * [Sum Of Geometric Progression](maths/sum_of_geometric_progression.py) @@ -812,6 +810,7 @@ * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py) * [Hubble Parameter](physics/hubble_parameter.py) * [Ideal Gas Law](physics/ideal_gas_law.py) + * [In Static Equilibrium](physics/in_static_equilibrium.py) * [Kinetic Energy](physics/kinetic_energy.py) * [Lorentz Transformation Four Vector](physics/lorentz_transformation_four_vector.py) * [Malus Law](physics/malus_law.py) diff --git a/arithmetic_analysis/README.md b/arithmetic_analysis/README.md deleted file mode 100644 index 45cf321eb..000000000 --- a/arithmetic_analysis/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Arithmetic analysis - -Arithmetic analysis is a branch of mathematics that deals with solving linear equations. - -* -* -* diff --git a/arithmetic_analysis/image_data/__init__.py b/arithmetic_analysis/image_data/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/arithmetic_analysis/gaussian_elimination.py b/linear_algebra/gaussian_elimination.py similarity index 100% rename from arithmetic_analysis/gaussian_elimination.py rename to linear_algebra/gaussian_elimination.py diff --git a/arithmetic_analysis/jacobi_iteration_method.py b/linear_algebra/jacobi_iteration_method.py similarity index 96% rename from arithmetic_analysis/jacobi_iteration_method.py rename to linear_algebra/jacobi_iteration_method.py index 44c52dd44..8c91a19ef 100644 --- a/arithmetic_analysis/jacobi_iteration_method.py +++ b/linear_algebra/jacobi_iteration_method.py @@ -1,203 +1,203 @@ -""" -Jacobi Iteration Method - https://en.wikipedia.org/wiki/Jacobi_method -""" -from __future__ import annotations - -import numpy as np -from numpy import float64 -from numpy.typing import NDArray - - -# Method to find solution of system of linear equations -def jacobi_iteration_method( - coefficient_matrix: NDArray[float64], - constant_matrix: NDArray[float64], - init_val: list[float], - iterations: int, -) -> list[float]: - """ - Jacobi Iteration Method: - An iterative algorithm to determine the solutions of strictly diagonally dominant - system of linear equations - - 4x1 + x2 + x3 = 2 - x1 + 5x2 + 2x3 = -6 - x1 + 2x2 + 4x3 = -4 - - x_init = [0.5, -0.5 , -0.5] - - Examples: - - >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]]) - >>> constant = np.array([[2], [-6], [-4]]) - >>> init_val = [0.5, -0.5, -0.5] - >>> iterations = 3 - >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) - [0.909375, -1.14375, -0.7484375] - - - >>> coefficient = np.array([[4, 1, 1], [1, 5, 2]]) - >>> constant = np.array([[2], [-6], [-4]]) - >>> init_val = [0.5, -0.5, -0.5] - >>> iterations = 3 - >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) - Traceback (most recent call last): - ... - ValueError: Coefficient matrix dimensions must be nxn but received 2x3 - - >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]]) - >>> constant = np.array([[2], [-6]]) - >>> init_val = [0.5, -0.5, -0.5] - >>> iterations = 3 - >>> jacobi_iteration_method( - ... coefficient, constant, init_val, iterations - ... ) # doctest: +NORMALIZE_WHITESPACE - Traceback (most recent call last): - ... - ValueError: Coefficient and constant matrices dimensions must be nxn and nx1 but - received 3x3 and 2x1 - - >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]]) - >>> constant = np.array([[2], [-6], [-4]]) - >>> init_val = [0.5, -0.5] - >>> iterations = 3 - >>> jacobi_iteration_method( - ... coefficient, constant, init_val, iterations - ... ) # doctest: +NORMALIZE_WHITESPACE - Traceback (most recent call last): - ... - ValueError: Number of initial values must be equal to number of rows in coefficient - matrix but received 2 and 3 - - >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]]) - >>> constant = np.array([[2], [-6], [-4]]) - >>> init_val = [0.5, -0.5, -0.5] - >>> iterations = 0 - >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) - Traceback (most recent call last): - ... - ValueError: Iterations must be at least 1 - """ - - rows1, cols1 = coefficient_matrix.shape - rows2, cols2 = constant_matrix.shape - - if rows1 != cols1: - msg = f"Coefficient matrix dimensions must be nxn but received {rows1}x{cols1}" - raise ValueError(msg) - - if cols2 != 1: - msg = f"Constant matrix must be nx1 but received {rows2}x{cols2}" - raise ValueError(msg) - - if rows1 != rows2: - msg = ( - "Coefficient and constant matrices dimensions must be nxn and nx1 but " - f"received {rows1}x{cols1} and {rows2}x{cols2}" - ) - raise ValueError(msg) - - if len(init_val) != rows1: - msg = ( - "Number of initial values must be equal to number of rows in coefficient " - f"matrix but received {len(init_val)} and {rows1}" - ) - raise ValueError(msg) - - if iterations <= 0: - raise ValueError("Iterations must be at least 1") - - table: NDArray[float64] = np.concatenate( - (coefficient_matrix, constant_matrix), axis=1 - ) - - rows, cols = table.shape - - strictly_diagonally_dominant(table) - - """ - # Iterates the whole matrix for given number of times - for _ in range(iterations): - new_val = [] - for row in range(rows): - temp = 0 - for col in range(cols): - if col == row: - denom = table[row][col] - elif col == cols - 1: - val = table[row][col] - else: - temp += (-1) * table[row][col] * init_val[col] - temp = (temp + val) / denom - new_val.append(temp) - init_val = new_val - """ - - # denominator - a list of values along the diagonal - denominator = np.diag(coefficient_matrix) - - # val_last - values of the last column of the table array - val_last = table[:, -1] - - # masks - boolean mask of all strings without diagonal - # elements array coefficient_matrix - masks = ~np.eye(coefficient_matrix.shape[0], dtype=bool) - - # no_diagonals - coefficient_matrix array values without diagonal elements - no_diagonals = coefficient_matrix[masks].reshape(-1, rows - 1) - - # Here we get 'i_col' - these are the column numbers, for each row - # without diagonal elements, except for the last column. - i_row, i_col = np.where(masks) - ind = i_col.reshape(-1, rows - 1) - - #'i_col' is converted to a two-dimensional list 'ind', which will be - # used to make selections from 'init_val' ('arr' array see below). - - # Iterates the whole matrix for given number of times - for _ in range(iterations): - arr = np.take(init_val, ind) - sum_product_rows = np.sum((-1) * no_diagonals * arr, axis=1) - new_val = (sum_product_rows + val_last) / denominator - init_val = new_val - - return new_val.tolist() - - -# Checks if the given matrix is strictly diagonally dominant -def strictly_diagonally_dominant(table: NDArray[float64]) -> bool: - """ - >>> table = np.array([[4, 1, 1, 2], [1, 5, 2, -6], [1, 2, 4, -4]]) - >>> strictly_diagonally_dominant(table) - True - - >>> table = np.array([[4, 1, 1, 2], [1, 5, 2, -6], [1, 2, 3, -4]]) - >>> strictly_diagonally_dominant(table) - Traceback (most recent call last): - ... - ValueError: Coefficient matrix is not strictly diagonally dominant - """ - - rows, cols = table.shape - - is_diagonally_dominant = True - - for i in range(rows): - total = 0 - for j in range(cols - 1): - if i == j: - continue - else: - total += table[i][j] - - if table[i][i] <= total: - raise ValueError("Coefficient matrix is not strictly diagonally dominant") - - return is_diagonally_dominant - - -# Test Cases -if __name__ == "__main__": - import doctest - - doctest.testmod() +""" +Jacobi Iteration Method - https://en.wikipedia.org/wiki/Jacobi_method +""" +from __future__ import annotations + +import numpy as np +from numpy import float64 +from numpy.typing import NDArray + + +# Method to find solution of system of linear equations +def jacobi_iteration_method( + coefficient_matrix: NDArray[float64], + constant_matrix: NDArray[float64], + init_val: list[float], + iterations: int, +) -> list[float]: + """ + Jacobi Iteration Method: + An iterative algorithm to determine the solutions of strictly diagonally dominant + system of linear equations + + 4x1 + x2 + x3 = 2 + x1 + 5x2 + 2x3 = -6 + x1 + 2x2 + 4x3 = -4 + + x_init = [0.5, -0.5 , -0.5] + + Examples: + + >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]]) + >>> constant = np.array([[2], [-6], [-4]]) + >>> init_val = [0.5, -0.5, -0.5] + >>> iterations = 3 + >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) + [0.909375, -1.14375, -0.7484375] + + + >>> coefficient = np.array([[4, 1, 1], [1, 5, 2]]) + >>> constant = np.array([[2], [-6], [-4]]) + >>> init_val = [0.5, -0.5, -0.5] + >>> iterations = 3 + >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) + Traceback (most recent call last): + ... + ValueError: Coefficient matrix dimensions must be nxn but received 2x3 + + >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]]) + >>> constant = np.array([[2], [-6]]) + >>> init_val = [0.5, -0.5, -0.5] + >>> iterations = 3 + >>> jacobi_iteration_method( + ... coefficient, constant, init_val, iterations + ... ) # doctest: +NORMALIZE_WHITESPACE + Traceback (most recent call last): + ... + ValueError: Coefficient and constant matrices dimensions must be nxn and nx1 but + received 3x3 and 2x1 + + >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]]) + >>> constant = np.array([[2], [-6], [-4]]) + >>> init_val = [0.5, -0.5] + >>> iterations = 3 + >>> jacobi_iteration_method( + ... coefficient, constant, init_val, iterations + ... ) # doctest: +NORMALIZE_WHITESPACE + Traceback (most recent call last): + ... + ValueError: Number of initial values must be equal to number of rows in coefficient + matrix but received 2 and 3 + + >>> coefficient = np.array([[4, 1, 1], [1, 5, 2], [1, 2, 4]]) + >>> constant = np.array([[2], [-6], [-4]]) + >>> init_val = [0.5, -0.5, -0.5] + >>> iterations = 0 + >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) + Traceback (most recent call last): + ... + ValueError: Iterations must be at least 1 + """ + + rows1, cols1 = coefficient_matrix.shape + rows2, cols2 = constant_matrix.shape + + if rows1 != cols1: + msg = f"Coefficient matrix dimensions must be nxn but received {rows1}x{cols1}" + raise ValueError(msg) + + if cols2 != 1: + msg = f"Constant matrix must be nx1 but received {rows2}x{cols2}" + raise ValueError(msg) + + if rows1 != rows2: + msg = ( + "Coefficient and constant matrices dimensions must be nxn and nx1 but " + f"received {rows1}x{cols1} and {rows2}x{cols2}" + ) + raise ValueError(msg) + + if len(init_val) != rows1: + msg = ( + "Number of initial values must be equal to number of rows in coefficient " + f"matrix but received {len(init_val)} and {rows1}" + ) + raise ValueError(msg) + + if iterations <= 0: + raise ValueError("Iterations must be at least 1") + + table: NDArray[float64] = np.concatenate( + (coefficient_matrix, constant_matrix), axis=1 + ) + + rows, cols = table.shape + + strictly_diagonally_dominant(table) + + """ + # Iterates the whole matrix for given number of times + for _ in range(iterations): + new_val = [] + for row in range(rows): + temp = 0 + for col in range(cols): + if col == row: + denom = table[row][col] + elif col == cols - 1: + val = table[row][col] + else: + temp += (-1) * table[row][col] * init_val[col] + temp = (temp + val) / denom + new_val.append(temp) + init_val = new_val + """ + + # denominator - a list of values along the diagonal + denominator = np.diag(coefficient_matrix) + + # val_last - values of the last column of the table array + val_last = table[:, -1] + + # masks - boolean mask of all strings without diagonal + # elements array coefficient_matrix + masks = ~np.eye(coefficient_matrix.shape[0], dtype=bool) + + # no_diagonals - coefficient_matrix array values without diagonal elements + no_diagonals = coefficient_matrix[masks].reshape(-1, rows - 1) + + # Here we get 'i_col' - these are the column numbers, for each row + # without diagonal elements, except for the last column. + i_row, i_col = np.where(masks) + ind = i_col.reshape(-1, rows - 1) + + #'i_col' is converted to a two-dimensional list 'ind', which will be + # used to make selections from 'init_val' ('arr' array see below). + + # Iterates the whole matrix for given number of times + for _ in range(iterations): + arr = np.take(init_val, ind) + sum_product_rows = np.sum((-1) * no_diagonals * arr, axis=1) + new_val = (sum_product_rows + val_last) / denominator + init_val = new_val + + return new_val.tolist() + + +# Checks if the given matrix is strictly diagonally dominant +def strictly_diagonally_dominant(table: NDArray[float64]) -> bool: + """ + >>> table = np.array([[4, 1, 1, 2], [1, 5, 2, -6], [1, 2, 4, -4]]) + >>> strictly_diagonally_dominant(table) + True + + >>> table = np.array([[4, 1, 1, 2], [1, 5, 2, -6], [1, 2, 3, -4]]) + >>> strictly_diagonally_dominant(table) + Traceback (most recent call last): + ... + ValueError: Coefficient matrix is not strictly diagonally dominant + """ + + rows, cols = table.shape + + is_diagonally_dominant = True + + for i in range(rows): + total = 0 + for j in range(cols - 1): + if i == j: + continue + else: + total += table[i][j] + + if table[i][i] <= total: + raise ValueError("Coefficient matrix is not strictly diagonally dominant") + + return is_diagonally_dominant + + +# Test Cases +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/arithmetic_analysis/lu_decomposition.py b/linear_algebra/lu_decomposition.py similarity index 100% rename from arithmetic_analysis/lu_decomposition.py rename to linear_algebra/lu_decomposition.py diff --git a/arithmetic_analysis/bisection.py b/maths/numerical_analysis/bisection.py similarity index 100% rename from arithmetic_analysis/bisection.py rename to maths/numerical_analysis/bisection.py diff --git a/maths/bisection.py b/maths/numerical_analysis/bisection_2.py similarity index 100% rename from maths/bisection.py rename to maths/numerical_analysis/bisection_2.py diff --git a/maths/integration_by_simpson_approx.py b/maths/numerical_analysis/integration_by_simpson_approx.py similarity index 100% rename from maths/integration_by_simpson_approx.py rename to maths/numerical_analysis/integration_by_simpson_approx.py diff --git a/arithmetic_analysis/intersection.py b/maths/numerical_analysis/intersection.py similarity index 100% rename from arithmetic_analysis/intersection.py rename to maths/numerical_analysis/intersection.py diff --git a/maths/nevilles_method.py b/maths/numerical_analysis/nevilles_method.py similarity index 100% rename from maths/nevilles_method.py rename to maths/numerical_analysis/nevilles_method.py diff --git a/arithmetic_analysis/newton_forward_interpolation.py b/maths/numerical_analysis/newton_forward_interpolation.py similarity index 100% rename from arithmetic_analysis/newton_forward_interpolation.py rename to maths/numerical_analysis/newton_forward_interpolation.py diff --git a/arithmetic_analysis/newton_method.py b/maths/numerical_analysis/newton_method.py similarity index 100% rename from arithmetic_analysis/newton_method.py rename to maths/numerical_analysis/newton_method.py diff --git a/arithmetic_analysis/newton_raphson.py b/maths/numerical_analysis/newton_raphson.py similarity index 61% rename from arithmetic_analysis/newton_raphson.py rename to maths/numerical_analysis/newton_raphson.py index 1b90ad417..8491ca800 100644 --- a/arithmetic_analysis/newton_raphson.py +++ b/maths/numerical_analysis/newton_raphson.py @@ -5,42 +5,41 @@ from __future__ import annotations from decimal import Decimal -from math import * # noqa: F403 -from sympy import diff +from sympy import diff, lambdify, symbols -def newton_raphson( - func: str, a: float | Decimal, precision: float = 10**-10 -) -> float: +def newton_raphson(func: str, a: float | Decimal, precision: float = 1e-10) -> float: """Finds root from the point 'a' onwards by Newton-Raphson method >>> newton_raphson("sin(x)", 2) 3.1415926536808043 - >>> newton_raphson("x**2 - 5*x +2", 0.4) + >>> newton_raphson("x**2 - 5*x + 2", 0.4) 0.4384471871911695 >>> newton_raphson("x**2 - 5", 0.1) 2.23606797749979 - >>> newton_raphson("log(x)- 1", 2) + >>> newton_raphson("log(x) - 1", 2) 2.718281828458938 """ - x = a + x = symbols("x") + f = lambdify(x, func, "math") + f_derivative = lambdify(x, diff(func), "math") + x_curr = a while True: - x = Decimal(x) - ( - Decimal(eval(func)) / Decimal(eval(str(diff(func)))) # noqa: S307 - ) - # This number dictates the accuracy of the answer - if abs(eval(func)) < precision: # noqa: S307 - return float(x) + x_curr = Decimal(x_curr) - Decimal(f(x_curr)) / Decimal(f_derivative(x_curr)) + if abs(f(x_curr)) < precision: + return float(x_curr) -# Let's Execute if __name__ == "__main__": - # Find root of trigonometric function + import doctest + + doctest.testmod() + # Find value of pi print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}") # Find root of polynomial print(f"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}") - # Find Square Root of 5 + # Find value of e print(f"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}") - # Exponential Roots + # Find root of exponential function print(f"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}") diff --git a/maths/newton_raphson.py b/maths/numerical_analysis/newton_raphson_2.py similarity index 100% rename from maths/newton_raphson.py rename to maths/numerical_analysis/newton_raphson_2.py diff --git a/arithmetic_analysis/newton_raphson_new.py b/maths/numerical_analysis/newton_raphson_new.py similarity index 100% rename from arithmetic_analysis/newton_raphson_new.py rename to maths/numerical_analysis/newton_raphson_new.py diff --git a/maths/numerical_integration.py b/maths/numerical_analysis/numerical_integration.py similarity index 100% rename from maths/numerical_integration.py rename to maths/numerical_analysis/numerical_integration.py diff --git a/maths/runge_kutta.py b/maths/numerical_analysis/runge_kutta.py similarity index 100% rename from maths/runge_kutta.py rename to maths/numerical_analysis/runge_kutta.py diff --git a/maths/runge_kutta_fehlberg_45.py b/maths/numerical_analysis/runge_kutta_fehlberg_45.py similarity index 100% rename from maths/runge_kutta_fehlberg_45.py rename to maths/numerical_analysis/runge_kutta_fehlberg_45.py diff --git a/arithmetic_analysis/secant_method.py b/maths/numerical_analysis/secant_method.py similarity index 100% rename from arithmetic_analysis/secant_method.py rename to maths/numerical_analysis/secant_method.py diff --git a/maths/simpson_rule.py b/maths/numerical_analysis/simpson_rule.py similarity index 100% rename from maths/simpson_rule.py rename to maths/numerical_analysis/simpson_rule.py diff --git a/maths/square_root.py b/maths/numerical_analysis/square_root.py similarity index 100% rename from maths/square_root.py rename to maths/numerical_analysis/square_root.py diff --git a/arithmetic_analysis/image_data/2D_problems.jpg b/physics/image_data/2D_problems.jpg similarity index 100% rename from arithmetic_analysis/image_data/2D_problems.jpg rename to physics/image_data/2D_problems.jpg diff --git a/arithmetic_analysis/image_data/2D_problems_1.jpg b/physics/image_data/2D_problems_1.jpg similarity index 100% rename from arithmetic_analysis/image_data/2D_problems_1.jpg rename to physics/image_data/2D_problems_1.jpg diff --git a/arithmetic_analysis/__init__.py b/physics/image_data/__init__.py similarity index 100% rename from arithmetic_analysis/__init__.py rename to physics/image_data/__init__.py diff --git a/arithmetic_analysis/in_static_equilibrium.py b/physics/in_static_equilibrium.py similarity index 96% rename from arithmetic_analysis/in_static_equilibrium.py rename to physics/in_static_equilibrium.py index 7aaecf174..d56299f60 100644 --- a/arithmetic_analysis/in_static_equilibrium.py +++ b/physics/in_static_equilibrium.py @@ -1,94 +1,94 @@ -""" -Checks if a system of forces is in static equilibrium. -""" -from __future__ import annotations - -from numpy import array, cos, cross, float64, radians, sin -from numpy.typing import NDArray - - -def polar_force( - magnitude: float, angle: float, radian_mode: bool = False -) -> list[float]: - """ - Resolves force along rectangular components. - (force, angle) => (force_x, force_y) - >>> import math - >>> force = polar_force(10, 45) - >>> math.isclose(force[0], 7.071067811865477) - True - >>> math.isclose(force[1], 7.0710678118654755) - True - >>> force = polar_force(10, 3.14, radian_mode=True) - >>> math.isclose(force[0], -9.999987317275396) - True - >>> math.isclose(force[1], 0.01592652916486828) - True - """ - if radian_mode: - return [magnitude * cos(angle), magnitude * sin(angle)] - return [magnitude * cos(radians(angle)), magnitude * sin(radians(angle))] - - -def in_static_equilibrium( - forces: NDArray[float64], location: NDArray[float64], eps: float = 10**-1 -) -> bool: - """ - Check if a system is in equilibrium. - It takes two numpy.array objects. - forces ==> [ - [force1_x, force1_y], - [force2_x, force2_y], - ....] - location ==> [ - [x1, y1], - [x2, y2], - ....] - >>> force = array([[1, 1], [-1, 2]]) - >>> location = array([[1, 0], [10, 0]]) - >>> in_static_equilibrium(force, location) - False - """ - # summation of moments is zero - moments: NDArray[float64] = cross(location, forces) - sum_moments: float = sum(moments) - return abs(sum_moments) < eps - - -if __name__ == "__main__": - # Test to check if it works - forces = array( - [ - polar_force(718.4, 180 - 30), - polar_force(879.54, 45), - polar_force(100, -90), - ] - ) - - location: NDArray[float64] = array([[0, 0], [0, 0], [0, 0]]) - - assert in_static_equilibrium(forces, location) - - # Problem 1 in image_data/2D_problems.jpg - forces = array( - [ - polar_force(30 * 9.81, 15), - polar_force(215, 180 - 45), - polar_force(264, 90 - 30), - ] - ) - - location = array([[0, 0], [0, 0], [0, 0]]) - - assert in_static_equilibrium(forces, location) - - # Problem in image_data/2D_problems_1.jpg - forces = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]]) - - location = array([[0, 0], [6, 0], [10, 0], [12, 0]]) - - assert in_static_equilibrium(forces, location) - - import doctest - - doctest.testmod() +""" +Checks if a system of forces is in static equilibrium. +""" +from __future__ import annotations + +from numpy import array, cos, cross, float64, radians, sin +from numpy.typing import NDArray + + +def polar_force( + magnitude: float, angle: float, radian_mode: bool = False +) -> list[float]: + """ + Resolves force along rectangular components. + (force, angle) => (force_x, force_y) + >>> import math + >>> force = polar_force(10, 45) + >>> math.isclose(force[0], 7.071067811865477) + True + >>> math.isclose(force[1], 7.0710678118654755) + True + >>> force = polar_force(10, 3.14, radian_mode=True) + >>> math.isclose(force[0], -9.999987317275396) + True + >>> math.isclose(force[1], 0.01592652916486828) + True + """ + if radian_mode: + return [magnitude * cos(angle), magnitude * sin(angle)] + return [magnitude * cos(radians(angle)), magnitude * sin(radians(angle))] + + +def in_static_equilibrium( + forces: NDArray[float64], location: NDArray[float64], eps: float = 10**-1 +) -> bool: + """ + Check if a system is in equilibrium. + It takes two numpy.array objects. + forces ==> [ + [force1_x, force1_y], + [force2_x, force2_y], + ....] + location ==> [ + [x1, y1], + [x2, y2], + ....] + >>> force = array([[1, 1], [-1, 2]]) + >>> location = array([[1, 0], [10, 0]]) + >>> in_static_equilibrium(force, location) + False + """ + # summation of moments is zero + moments: NDArray[float64] = cross(location, forces) + sum_moments: float = sum(moments) + return abs(sum_moments) < eps + + +if __name__ == "__main__": + # Test to check if it works + forces = array( + [ + polar_force(718.4, 180 - 30), + polar_force(879.54, 45), + polar_force(100, -90), + ] + ) + + location: NDArray[float64] = array([[0, 0], [0, 0], [0, 0]]) + + assert in_static_equilibrium(forces, location) + + # Problem 1 in image_data/2D_problems.jpg + forces = array( + [ + polar_force(30 * 9.81, 15), + polar_force(215, 180 - 45), + polar_force(264, 90 - 30), + ] + ) + + location = array([[0, 0], [0, 0], [0, 0]]) + + assert in_static_equilibrium(forces, location) + + # Problem in image_data/2D_problems_1.jpg + forces = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]]) + + location = array([[0, 0], [6, 0], [10, 0], [12, 0]]) + + assert in_static_equilibrium(forces, location) + + import doctest + + doctest.testmod() From 417b7edfc3fdfe9534a56e3e7d0a368f76b3edb4 Mon Sep 17 00:00:00 2001 From: Krishna-singhal <65902764+Krishna-Singhal@users.noreply.github.com> Date: Mon, 23 Oct 2023 13:21:27 +0530 Subject: [PATCH 205/306] code enhancement in `sort.double_sort` (#10798) * don't need to return list because list is mutable * Don't need to return list as list is mutable * use advantage of python in swapping * filter blank inputs from input list * minor changes * minor mistake * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * more readable * Update double_sort.py * last fixes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- sorts/double_sort.py | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/sorts/double_sort.py b/sorts/double_sort.py index a19641d94..bd5fdca1e 100644 --- a/sorts/double_sort.py +++ b/sorts/double_sort.py @@ -1,4 +1,7 @@ -def double_sort(lst): +from typing import Any + + +def double_sort(collection: list[Any]) -> list[Any]: """This sorting algorithm sorts an array using the principle of bubble sort, but does it both from left to right and right to left. Hence, it's called "Double sort" @@ -14,29 +17,28 @@ def double_sort(lst): >>> double_sort([-3, 10, 16, -42, 29]) == sorted([-3, 10, 16, -42, 29]) True """ - no_of_elements = len(lst) + no_of_elements = len(collection) for _ in range( int(((no_of_elements - 1) / 2) + 1) ): # we don't need to traverse to end of list as for j in range(no_of_elements - 1): - if ( - lst[j + 1] < lst[j] - ): # applying bubble sort algorithm from left to right (or forwards) - temp = lst[j + 1] - lst[j + 1] = lst[j] - lst[j] = temp - if ( - lst[no_of_elements - 1 - j] < lst[no_of_elements - 2 - j] - ): # applying bubble sort algorithm from right to left (or backwards) - temp = lst[no_of_elements - 1 - j] - lst[no_of_elements - 1 - j] = lst[no_of_elements - 2 - j] - lst[no_of_elements - 2 - j] = temp - return lst + # apply the bubble sort algorithm from left to right (or forwards) + if collection[j + 1] < collection[j]: + collection[j], collection[j + 1] = collection[j + 1], collection[j] + # apply the bubble sort algorithm from right to left (or backwards) + if collection[no_of_elements - 1 - j] < collection[no_of_elements - 2 - j]: + ( + collection[no_of_elements - 1 - j], + collection[no_of_elements - 2 - j], + ) = ( + collection[no_of_elements - 2 - j], + collection[no_of_elements - 1 - j], + ) + return collection if __name__ == "__main__": - print("enter the list to be sorted") - lst = [int(x) for x in input().split()] # inputing elements of the list in one line - sorted_lst = double_sort(lst) + # allow the user to input the elements of the list on one line + unsorted = [int(x) for x in input("Enter the list to be sorted: ").split() if x] print("the sorted list is") - print(sorted_lst) + print(f"{double_sort(unsorted) = }") From d051db1f14cbb0edd2b0db1e4edef76cce6c7823 Mon Sep 17 00:00:00 2001 From: Berat Osman Demiralay Date: Mon, 23 Oct 2023 16:25:07 +0300 Subject: [PATCH 206/306] Add Simple Moving Average (SMA) Calculation (#9300) * Add Simple Moving Average (SMA) Calculation This commit adds a Python script for calculating the Simple Moving Average (SMA) of a time series data. The script also includes a doctest that verifies the correctness of the SMA calculations for a sample dataset. Usage: - Run the script with your own time series data and specify the window size for SMA calculations. * Update financial/simple_moving_average.py Co-authored-by: Tianyi Zheng * Update financial/simple_moving_average.py Co-authored-by: Tianyi Zheng * Update financial/simple_moving_average.py Co-authored-by: Tianyi Zheng * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update simple_moving_average.py * Update financial/simple_moving_average.py * Update simple_moving_average.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Tianyi Zheng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- financial/simple_moving_average.py | 68 ++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 financial/simple_moving_average.py diff --git a/financial/simple_moving_average.py b/financial/simple_moving_average.py new file mode 100644 index 000000000..d5d68ffd3 --- /dev/null +++ b/financial/simple_moving_average.py @@ -0,0 +1,68 @@ +""" +The Simple Moving Average (SMA) is a statistical calculation used to analyze data points +by creating a constantly updated average price over a specific time period. +In finance, SMA is often used in time series analysis to smooth out price data +and identify trends. + +Reference: https://en.wikipedia.org/wiki/Moving_average +""" +from collections.abc import Sequence + + +def simple_moving_average( + data: Sequence[float], window_size: int +) -> list[float | None]: + """ + Calculate the simple moving average (SMA) for some given time series data. + + :param data: A list of numerical data points. + :param window_size: An integer representing the size of the SMA window. + :return: A list of SMA values with the same length as the input data. + + Examples: + >>> sma = simple_moving_average([10, 12, 15, 13, 14, 16, 18, 17, 19, 21], 3) + >>> [round(value, 2) if value is not None else None for value in sma] + [None, None, 12.33, 13.33, 14.0, 14.33, 16.0, 17.0, 18.0, 19.0] + >>> simple_moving_average([10, 12, 15], 5) + [None, None, None] + >>> simple_moving_average([10, 12, 15, 13, 14, 16, 18, 17, 19, 21], 0) + Traceback (most recent call last): + ... + ValueError: Window size must be a positive integer + """ + if window_size < 1: + raise ValueError("Window size must be a positive integer") + + sma: list[float | None] = [] + + for i in range(len(data)): + if i < window_size - 1: + sma.append(None) # SMA not available for early data points + else: + window = data[i - window_size + 1 : i + 1] + sma_value = sum(window) / window_size + sma.append(sma_value) + return sma + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + # Example data (replace with your own time series data) + data = [10, 12, 15, 13, 14, 16, 18, 17, 19, 21] + + # Specify the window size for the SMA + window_size = 3 + + # Calculate the Simple Moving Average + sma_values = simple_moving_average(data, window_size) + + # Print the SMA values + print("Simple Moving Average (SMA) Values:") + for i, value in enumerate(sma_values): + if value is not None: + print(f"Day {i + 1}: {value:.2f}") + else: + print(f"Day {i + 1}: Not enough data for SMA") From 4cbefadbd7adee486e33a6b66014a2474e81f82e Mon Sep 17 00:00:00 2001 From: Tauseef Hilal Tantary Date: Mon, 23 Oct 2023 19:21:09 +0530 Subject: [PATCH 207/306] [New Algorithm] - Triangular Numbers (#10663) * Add New Algorithm: Triangular Numbers * Calculate nth triangular number instead of generating a list * Handle 0th position and update function name and docstring --- maths/special_numbers/triangular_numbers.py | 43 +++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 maths/special_numbers/triangular_numbers.py diff --git a/maths/special_numbers/triangular_numbers.py b/maths/special_numbers/triangular_numbers.py new file mode 100644 index 000000000..5be89e610 --- /dev/null +++ b/maths/special_numbers/triangular_numbers.py @@ -0,0 +1,43 @@ +""" +A triangular number or triangle number counts objects arranged in an +equilateral triangle. This module provides a function to generate n'th +triangular number. + +For more information about triangular numbers, refer to: +https://en.wikipedia.org/wiki/Triangular_number +""" + + +def triangular_number(position: int) -> int: + """ + Generate the triangular number at the specified position. + + Args: + position (int): The position of the triangular number to generate. + + Returns: + int: The triangular number at the specified position. + + Raises: + ValueError: If `position` is negative. + + Examples: + >>> triangular_number(1) + 1 + >>> triangular_number(3) + 6 + >>> triangular_number(-1) + Traceback (most recent call last): + ... + ValueError: param `position` must be non-negative + """ + if position < 0: + raise ValueError("param `position` must be non-negative") + + return position * (position + 1) // 2 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 54e2aa67e8f74435b15e2a2864a7fb00981979af Mon Sep 17 00:00:00 2001 From: Hardik Pawar <97388607+Hardvan@users.noreply.github.com> Date: Mon, 23 Oct 2023 23:12:02 +0530 Subject: [PATCH 208/306] Enhance readability of Minimax (#10838) * Enhance readability of Minimax * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Reduce line overflow * Update backtracking/minimax.py Co-authored-by: Tianyi Zheng * Update backtracking/minimax.py Co-authored-by: Tianyi Zheng * Update backtracking/minimax.py Co-authored-by: Tianyi Zheng * Remove line overflow --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- backtracking/minimax.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/backtracking/minimax.py b/backtracking/minimax.py index 6e310131e..6dece2990 100644 --- a/backtracking/minimax.py +++ b/backtracking/minimax.py @@ -16,6 +16,22 @@ def minimax( depth: int, node_index: int, is_max: bool, scores: list[int], height: float ) -> int: """ + This function implements the minimax algorithm, which helps achieve the optimal + score for a player in a two-player game by checking all possible moves. + If the player is the maximizer, then the score is maximized. + If the player is the minimizer, then the score is minimized. + + Parameters: + - depth: Current depth in the game tree. + - node_index: Index of the current node in the scores list. + - is_max: A boolean indicating whether the current move + is for the maximizer (True) or minimizer (False). + - scores: A list containing the scores of the leaves of the game tree. + - height: The maximum height of the game tree. + + Returns: + - An integer representing the optimal score for the current player. + >>> import math >>> scores = [90, 23, 6, 33, 21, 65, 123, 34423] >>> height = math.log(len(scores), 2) @@ -37,19 +53,24 @@ def minimax( if depth < 0: raise ValueError("Depth cannot be less than 0") - if len(scores) == 0: raise ValueError("Scores cannot be empty") + # Base case: If the current depth equals the height of the tree, + # return the score of the current node. if depth == height: return scores[node_index] + # If it's the maximizer's turn, choose the maximum score + # between the two possible moves. if is_max: return max( minimax(depth + 1, node_index * 2, False, scores, height), minimax(depth + 1, node_index * 2 + 1, False, scores, height), ) + # If it's the minimizer's turn, choose the minimum score + # between the two possible moves. return min( minimax(depth + 1, node_index * 2, True, scores, height), minimax(depth + 1, node_index * 2 + 1, True, scores, height), @@ -57,8 +78,11 @@ def minimax( def main() -> None: + # Sample scores and height calculation scores = [90, 23, 6, 33, 21, 65, 123, 34423] height = math.log(len(scores), 2) + + # Calculate and print the optimal value using the minimax algorithm print("Optimal value : ", end="") print(minimax(0, 0, True, scores, height)) From 30122062b93cdeba8bacb0a4a3c783bc8069b7a0 Mon Sep 17 00:00:00 2001 From: Aqib Javid Bhat Date: Mon, 23 Oct 2023 23:26:43 +0530 Subject: [PATCH 209/306] Add Floyd's Cycle Detection Algorithm (#10833) * Add Floyd's Cycle Detection Algorithm * Add tests for add_node function * Apply suggestions from code review * Update floyds_cycle_detection.py --------- Co-authored-by: Tianyi Zheng --- .../linked_list/floyds_cycle_detection.py | 150 ++++++++++++++++++ 1 file changed, 150 insertions(+) create mode 100644 data_structures/linked_list/floyds_cycle_detection.py diff --git a/data_structures/linked_list/floyds_cycle_detection.py b/data_structures/linked_list/floyds_cycle_detection.py new file mode 100644 index 000000000..6c3f13760 --- /dev/null +++ b/data_structures/linked_list/floyds_cycle_detection.py @@ -0,0 +1,150 @@ +""" +Floyd's cycle detection algorithm is a popular algorithm used to detect cycles +in a linked list. It uses two pointers, a slow pointer and a fast pointer, +to traverse the linked list. The slow pointer moves one node at a time while the fast +pointer moves two nodes at a time. If there is a cycle in the linked list, +the fast pointer will eventually catch up to the slow pointer and they will +meet at the same node. If there is no cycle, the fast pointer will reach the end of +the linked list and the algorithm will terminate. + +For more information: https://en.wikipedia.org/wiki/Cycle_detection#Floyd's_tortoise_and_hare +""" + +from collections.abc import Iterator +from dataclasses import dataclass +from typing import Any, Self + + +@dataclass +class Node: + """ + A class representing a node in a singly linked list. + """ + + data: Any + next_node: Self | None = None + + +@dataclass +class LinkedList: + """ + A class representing a singly linked list. + """ + + head: Node | None = None + + def __iter__(self) -> Iterator: + """ + Iterates through the linked list. + + Returns: + Iterator: An iterator over the linked list. + + Examples: + >>> linked_list = LinkedList() + >>> list(linked_list) + [] + >>> linked_list.add_node(1) + >>> tuple(linked_list) + (1,) + """ + visited = [] + node = self.head + while node: + # Avoid infinite loop in there's a cycle + if node in visited: + return + visited.append(node) + yield node.data + node = node.next_node + + def add_node(self, data: Any) -> None: + """ + Adds a new node to the end of the linked list. + + Args: + data (Any): The data to be stored in the new node. + + Examples: + >>> linked_list = LinkedList() + >>> linked_list.add_node(1) + >>> linked_list.add_node(2) + >>> linked_list.add_node(3) + >>> linked_list.add_node(4) + >>> tuple(linked_list) + (1, 2, 3, 4) + """ + new_node = Node(data) + + if self.head is None: + self.head = new_node + return + + current_node = self.head + while current_node.next_node is not None: + current_node = current_node.next_node + + current_node.next_node = new_node + + def detect_cycle(self) -> bool: + """ + Detects if there is a cycle in the linked list using + Floyd's cycle detection algorithm. + + Returns: + bool: True if there is a cycle, False otherwise. + + Examples: + >>> linked_list = LinkedList() + >>> linked_list.add_node(1) + >>> linked_list.add_node(2) + >>> linked_list.add_node(3) + >>> linked_list.add_node(4) + + >>> linked_list.detect_cycle() + False + + # Create a cycle in the linked list + >>> linked_list.head.next_node.next_node.next_node = linked_list.head.next_node + + >>> linked_list.detect_cycle() + True + """ + if self.head is None: + return False + + slow_pointer: Node | None = self.head + fast_pointer: Node | None = self.head + + while fast_pointer is not None and fast_pointer.next_node is not None: + slow_pointer = slow_pointer.next_node if slow_pointer else None + fast_pointer = fast_pointer.next_node.next_node + if slow_pointer == fast_pointer: + return True + + return False + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + linked_list = LinkedList() + linked_list.add_node(1) + linked_list.add_node(2) + linked_list.add_node(3) + linked_list.add_node(4) + + # Create a cycle in the linked list + # It first checks if the head, next_node, and next_node.next_node attributes of the + # linked list are not None to avoid any potential type errors. + if ( + linked_list.head + and linked_list.head.next_node + and linked_list.head.next_node.next_node + ): + linked_list.head.next_node.next_node.next_node = linked_list.head.next_node + + has_cycle = linked_list.detect_cycle() + print(has_cycle) # Output: True From ffd3a56c35f5ec274c819e8f2596ab5134cf9c36 Mon Sep 17 00:00:00 2001 From: SEIKH NABAB UDDIN <93948993+nababuddin@users.noreply.github.com> Date: Mon, 23 Oct 2023 23:42:28 +0530 Subject: [PATCH 210/306] Updated Selection Sort (#10855) * Update selection_sort.py * Update selection_sort.py --- sorts/selection_sort.py | 34 +++++++++++----------------------- 1 file changed, 11 insertions(+), 23 deletions(-) diff --git a/sorts/selection_sort.py b/sorts/selection_sort.py index 28971a5e1..506836b53 100644 --- a/sorts/selection_sort.py +++ b/sorts/selection_sort.py @@ -1,22 +1,9 @@ -""" -This is a pure Python implementation of the selection sort algorithm - -For doctests run following command: -python -m doctest -v selection_sort.py -or -python3 -m doctest -v selection_sort.py - -For manual testing run: -python selection_sort.py -""" - - def selection_sort(collection: list[int]) -> list[int]: - """Pure implementation of the selection sort algorithm in Python - :param collection: some mutable ordered collection with heterogeneous - comparable items inside - :return: the same collection ordered by ascending + """ + Sorts a list in ascending order using the selection sort algorithm. + :param collection: A list of integers to be sorted. + :return: The sorted list. Examples: >>> selection_sort([0, 5, 3, 2, 2]) @@ -31,16 +18,17 @@ def selection_sort(collection: list[int]) -> list[int]: length = len(collection) for i in range(length - 1): - least = i + min_index = i for k in range(i + 1, length): - if collection[k] < collection[least]: - least = k - if least != i: - collection[least], collection[i] = (collection[i], collection[least]) + if collection[k] < collection[min_index]: + min_index = k + if min_index != i: + collection[i], collection[min_index] = collection[min_index], collection[i] return collection if __name__ == "__main__": user_input = input("Enter numbers separated by a comma:\n").strip() unsorted = [int(item) for item in user_input.split(",")] - print(selection_sort(unsorted)) + sorted_list = selection_sort(unsorted) + print("Sorted List:", sorted_list) From e5d6969f38ecf03f3e3a1e35fcfd3ae2484b6e08 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 23 Oct 2023 22:29:16 +0200 Subject: [PATCH 211/306] [pre-commit.ci] pre-commit autoupdate (#10856) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.0.292 → v0.1.1](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.292...v0.1.1) - [github.com/psf/black: 23.9.1 → 23.10.0](https://github.com/psf/black/compare/23.9.1...23.10.0) - [github.com/pre-commit/mirrors-mypy: v1.6.0 → v1.6.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.6.0...v1.6.1) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- DIRECTORY.md | 4 ++++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b3def463d..e0b9922fa 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,12 +16,12 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.292 + rev: v0.1.1 hooks: - id: ruff - repo: https://github.com/psf/black - rev: 23.9.1 + rev: 23.10.0 hooks: - id: black @@ -51,7 +51,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.6.0 + rev: v1.6.1 hooks: - id: mypy args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 1e3711fe8..dfd1a2c0c 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -233,6 +233,7 @@ * [Deque Doubly](data_structures/linked_list/deque_doubly.py) * [Doubly Linked List](data_structures/linked_list/doubly_linked_list.py) * [Doubly Linked List Two](data_structures/linked_list/doubly_linked_list_two.py) + * [Floyds Cycle Detection](data_structures/linked_list/floyds_cycle_detection.py) * [From Sequence](data_structures/linked_list/from_sequence.py) * [Has Loop](data_structures/linked_list/has_loop.py) * [Is Palindrome](data_structures/linked_list/is_palindrome.py) @@ -394,6 +395,7 @@ * [Interest](financial/interest.py) * [Present Value](financial/present_value.py) * [Price Plus Tax](financial/price_plus_tax.py) + * [Simple Moving Average](financial/simple_moving_average.py) ## Fractals * [Julia Sets](fractals/julia_sets.py) @@ -706,6 +708,7 @@ * [Polygonal Numbers](maths/special_numbers/polygonal_numbers.py) * [Pronic Number](maths/special_numbers/pronic_number.py) * [Proth Number](maths/special_numbers/proth_number.py) + * [Triangular Numbers](maths/special_numbers/triangular_numbers.py) * [Ugly Numbers](maths/special_numbers/ugly_numbers.py) * [Weird Number](maths/special_numbers/weird_number.py) * [Sum Of Arithmetic Series](maths/sum_of_arithmetic_series.py) @@ -826,6 +829,7 @@ * [Shear Stress](physics/shear_stress.py) * [Speed Of Sound](physics/speed_of_sound.py) * [Speeds Of Gas Molecules](physics/speeds_of_gas_molecules.py) + * [Terminal Velocity](physics/terminal_velocity.py) ## Project Euler * Problem 001 From b98312ca9f2df491017e189b353e6b382b323eed Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Mon, 23 Oct 2023 16:37:17 -0400 Subject: [PATCH 212/306] Consolidate Newton-Raphson implementations (#10859) * updating DIRECTORY.md * updating DIRECTORY.md * Consolidate Newton-Raphson duplicates * Rename consolidated Newton-Raphson file * updating DIRECTORY.md * updating DIRECTORY.md * Fix doctest precision * Fix doctest precision again --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 3 - maths/numerical_analysis/newton_method.py | 54 ------- maths/numerical_analysis/newton_raphson.py | 134 +++++++++++++----- maths/numerical_analysis/newton_raphson_2.py | 64 --------- .../numerical_analysis/newton_raphson_new.py | 83 ----------- 5 files changed, 101 insertions(+), 237 deletions(-) delete mode 100644 maths/numerical_analysis/newton_method.py delete mode 100644 maths/numerical_analysis/newton_raphson_2.py delete mode 100644 maths/numerical_analysis/newton_raphson_new.py diff --git a/DIRECTORY.md b/DIRECTORY.md index dfd1a2c0c..f0b1f7c13 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -642,10 +642,7 @@ * [Intersection](maths/numerical_analysis/intersection.py) * [Nevilles Method](maths/numerical_analysis/nevilles_method.py) * [Newton Forward Interpolation](maths/numerical_analysis/newton_forward_interpolation.py) - * [Newton Method](maths/numerical_analysis/newton_method.py) * [Newton Raphson](maths/numerical_analysis/newton_raphson.py) - * [Newton Raphson 2](maths/numerical_analysis/newton_raphson_2.py) - * [Newton Raphson New](maths/numerical_analysis/newton_raphson_new.py) * [Numerical Integration](maths/numerical_analysis/numerical_integration.py) * [Runge Kutta](maths/numerical_analysis/runge_kutta.py) * [Runge Kutta Fehlberg 45](maths/numerical_analysis/runge_kutta_fehlberg_45.py) diff --git a/maths/numerical_analysis/newton_method.py b/maths/numerical_analysis/newton_method.py deleted file mode 100644 index 5127bfcaf..000000000 --- a/maths/numerical_analysis/newton_method.py +++ /dev/null @@ -1,54 +0,0 @@ -"""Newton's Method.""" - -# Newton's Method - https://en.wikipedia.org/wiki/Newton%27s_method -from collections.abc import Callable - -RealFunc = Callable[[float], float] # type alias for a real -> real function - - -# function is the f(x) and derivative is the f'(x) -def newton( - function: RealFunc, - derivative: RealFunc, - starting_int: int, -) -> float: - """ - >>> newton(lambda x: x ** 3 - 2 * x - 5, lambda x: 3 * x ** 2 - 2, 3) - 2.0945514815423474 - >>> newton(lambda x: x ** 3 - 1, lambda x: 3 * x ** 2, -2) - 1.0 - >>> newton(lambda x: x ** 3 - 1, lambda x: 3 * x ** 2, -4) - 1.0000000000000102 - >>> import math - >>> newton(math.sin, math.cos, 1) - 0.0 - >>> newton(math.sin, math.cos, 2) - 3.141592653589793 - >>> newton(math.cos, lambda x: -math.sin(x), 2) - 1.5707963267948966 - >>> newton(math.cos, lambda x: -math.sin(x), 0) - Traceback (most recent call last): - ... - ZeroDivisionError: Could not find root - """ - prev_guess = float(starting_int) - while True: - try: - next_guess = prev_guess - function(prev_guess) / derivative(prev_guess) - except ZeroDivisionError: - raise ZeroDivisionError("Could not find root") from None - if abs(prev_guess - next_guess) < 10**-5: - return next_guess - prev_guess = next_guess - - -def f(x: float) -> float: - return (x**3) - (2 * x) - 5 - - -def f1(x: float) -> float: - return 3 * (x**2) - 2 - - -if __name__ == "__main__": - print(newton(f, f1, 3)) diff --git a/maths/numerical_analysis/newton_raphson.py b/maths/numerical_analysis/newton_raphson.py index 8491ca800..feee38f90 100644 --- a/maths/numerical_analysis/newton_raphson.py +++ b/maths/numerical_analysis/newton_raphson.py @@ -1,45 +1,113 @@ -# Implementing Newton Raphson method in Python -# Author: Syed Haseeb Shah (github.com/QuantumNovice) -# The Newton-Raphson method (also known as Newton's method) is a way to -# quickly find a good approximation for the root of a real-valued function -from __future__ import annotations +""" +The Newton-Raphson method (aka the Newton method) is a root-finding algorithm that +approximates a root of a given real-valued function f(x). It is an iterative method +given by the formula -from decimal import Decimal +x_{n + 1} = x_n + f(x_n) / f'(x_n) -from sympy import diff, lambdify, symbols +with the precision of the approximation increasing as the number of iterations increase. + +Reference: https://en.wikipedia.org/wiki/Newton%27s_method +""" +from collections.abc import Callable + +RealFunc = Callable[[float], float] -def newton_raphson(func: str, a: float | Decimal, precision: float = 1e-10) -> float: - """Finds root from the point 'a' onwards by Newton-Raphson method - >>> newton_raphson("sin(x)", 2) - 3.1415926536808043 - >>> newton_raphson("x**2 - 5*x + 2", 0.4) - 0.4384471871911695 - >>> newton_raphson("x**2 - 5", 0.1) - 2.23606797749979 - >>> newton_raphson("log(x) - 1", 2) - 2.718281828458938 +def calc_derivative(f: RealFunc, x: float, delta_x: float = 1e-3) -> float: """ - x = symbols("x") - f = lambdify(x, func, "math") - f_derivative = lambdify(x, diff(func), "math") - x_curr = a - while True: - x_curr = Decimal(x_curr) - Decimal(f(x_curr)) / Decimal(f_derivative(x_curr)) - if abs(f(x_curr)) < precision: - return float(x_curr) + Approximate the derivative of a function f(x) at a point x using the finite + difference method + + >>> import math + >>> tolerance = 1e-5 + >>> derivative = calc_derivative(lambda x: x**2, 2) + >>> math.isclose(derivative, 4, abs_tol=tolerance) + True + >>> derivative = calc_derivative(math.sin, 0) + >>> math.isclose(derivative, 1, abs_tol=tolerance) + True + """ + return (f(x + delta_x / 2) - f(x - delta_x / 2)) / delta_x + + +def newton_raphson( + f: RealFunc, + x0: float = 0, + max_iter: int = 100, + step: float = 1e-6, + max_error: float = 1e-6, + log_steps: bool = False, +) -> tuple[float, float, list[float]]: + """ + Find a root of the given function f using the Newton-Raphson method. + + :param f: A real-valued single-variable function + :param x0: Initial guess + :param max_iter: Maximum number of iterations + :param step: Step size of x, used to approximate f'(x) + :param max_error: Maximum approximation error + :param log_steps: bool denoting whether to log intermediate steps + + :return: A tuple containing the approximation, the error, and the intermediate + steps. If log_steps is False, then an empty list is returned for the third + element of the tuple. + + :raises ZeroDivisionError: The derivative approaches 0. + :raises ArithmeticError: No solution exists, or the solution isn't found before the + iteration limit is reached. + + >>> import math + >>> tolerance = 1e-15 + >>> root, *_ = newton_raphson(lambda x: x**2 - 5*x + 2, 0.4, max_error=tolerance) + >>> math.isclose(root, (5 - math.sqrt(17)) / 2, abs_tol=tolerance) + True + >>> root, *_ = newton_raphson(lambda x: math.log(x) - 1, 2, max_error=tolerance) + >>> math.isclose(root, math.e, abs_tol=tolerance) + True + >>> root, *_ = newton_raphson(math.sin, 1, max_error=tolerance) + >>> math.isclose(root, 0, abs_tol=tolerance) + True + >>> newton_raphson(math.cos, 0) + Traceback (most recent call last): + ... + ZeroDivisionError: No converging solution found, zero derivative + >>> newton_raphson(lambda x: x**2 + 1, 2) + Traceback (most recent call last): + ... + ArithmeticError: No converging solution found, iteration limit reached + """ + + def f_derivative(x: float) -> float: + return calc_derivative(f, x, step) + + a = x0 # Set initial guess + steps = [] + for _ in range(max_iter): + if log_steps: # Log intermediate steps + steps.append(a) + + error = abs(f(a)) + if error < max_error: + return a, error, steps + + if f_derivative(a) == 0: + raise ZeroDivisionError("No converging solution found, zero derivative") + a -= f(a) / f_derivative(a) # Calculate next estimate + raise ArithmeticError("No converging solution found, iteration limit reached") if __name__ == "__main__": import doctest + from math import exp, tanh doctest.testmod() - # Find value of pi - print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}") - # Find root of polynomial - print(f"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}") - # Find value of e - print(f"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}") - # Find root of exponential function - print(f"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}") + def func(x: float) -> float: + return tanh(x) ** 2 - exp(3 * x) + + solution, err, steps = newton_raphson( + func, x0=10, max_iter=100, step=1e-6, log_steps=True + ) + print(f"{solution=}, {err=}") + print("\n".join(str(x) for x in steps)) diff --git a/maths/numerical_analysis/newton_raphson_2.py b/maths/numerical_analysis/newton_raphson_2.py deleted file mode 100644 index f6b227b5c..000000000 --- a/maths/numerical_analysis/newton_raphson_2.py +++ /dev/null @@ -1,64 +0,0 @@ -""" -Author: P Shreyas Shetty -Implementation of Newton-Raphson method for solving equations of kind -f(x) = 0. It is an iterative method where solution is found by the expression - x[n+1] = x[n] + f(x[n])/f'(x[n]) -If no solution exists, then either the solution will not be found when iteration -limit is reached or the gradient f'(x[n]) approaches zero. In both cases, exception -is raised. If iteration limit is reached, try increasing maxiter. -""" - -import math as m -from collections.abc import Callable - -DerivativeFunc = Callable[[float], float] - - -def calc_derivative(f: DerivativeFunc, a: float, h: float = 0.001) -> float: - """ - Calculates derivative at point a for function f using finite difference - method - """ - return (f(a + h) - f(a - h)) / (2 * h) - - -def newton_raphson( - f: DerivativeFunc, - x0: float = 0, - maxiter: int = 100, - step: float = 0.0001, - maxerror: float = 1e-6, - logsteps: bool = False, -) -> tuple[float, float, list[float]]: - a = x0 # set the initial guess - steps = [a] - error = abs(f(a)) - f1 = lambda x: calc_derivative(f, x, h=step) # noqa: E731 Derivative of f(x) - for _ in range(maxiter): - if f1(a) == 0: - raise ValueError("No converging solution found") - a = a - f(a) / f1(a) # Calculate the next estimate - if logsteps: - steps.append(a) - if error < maxerror: - break - else: - raise ValueError("Iteration limit reached, no converging solution found") - if logsteps: - # If logstep is true, then log intermediate steps - return a, error, steps - return a, error, [] - - -if __name__ == "__main__": - from matplotlib import pyplot as plt - - f = lambda x: m.tanh(x) ** 2 - m.exp(3 * x) # noqa: E731 - solution, error, steps = newton_raphson( - f, x0=10, maxiter=1000, step=1e-6, logsteps=True - ) - plt.plot([abs(f(x)) for x in steps]) - plt.xlabel("step") - plt.ylabel("error") - plt.show() - print(f"solution = {{{solution:f}}}, error = {{{error:f}}}") diff --git a/maths/numerical_analysis/newton_raphson_new.py b/maths/numerical_analysis/newton_raphson_new.py deleted file mode 100644 index f61841e2e..000000000 --- a/maths/numerical_analysis/newton_raphson_new.py +++ /dev/null @@ -1,83 +0,0 @@ -# Implementing Newton Raphson method in Python -# Author: Saksham Gupta -# -# The Newton-Raphson method (also known as Newton's method) is a way to -# quickly find a good approximation for the root of a functreal-valued ion -# The method can also be extended to complex functions -# -# Newton's Method - https://en.wikipedia.org/wiki/Newton's_method - -from sympy import diff, lambdify, symbols -from sympy.functions import * # noqa: F403 - - -def newton_raphson( - function: str, - starting_point: complex, - variable: str = "x", - precision: float = 10**-10, - multiplicity: int = 1, -) -> complex: - """Finds root from the 'starting_point' onwards by Newton-Raphson method - Refer to https://docs.sympy.org/latest/modules/functions/index.html - for usable mathematical functions - - >>> newton_raphson("sin(x)", 2) - 3.141592653589793 - >>> newton_raphson("x**4 -5", 0.4 + 5j) - (-7.52316384526264e-37+1.4953487812212207j) - >>> newton_raphson('log(y) - 1', 2, variable='y') - 2.7182818284590455 - >>> newton_raphson('exp(x) - 1', 10, precision=0.005) - 1.2186556186174883e-10 - >>> newton_raphson('cos(x)', 0) - Traceback (most recent call last): - ... - ZeroDivisionError: Could not find root - """ - - x = symbols(variable) - func = lambdify(x, function) - diff_function = lambdify(x, diff(function, x)) - - prev_guess = starting_point - - while True: - if diff_function(prev_guess) != 0: - next_guess = prev_guess - multiplicity * func(prev_guess) / diff_function( - prev_guess - ) - else: - raise ZeroDivisionError("Could not find root") from None - - # Precision is checked by comparing the difference of consecutive guesses - if abs(next_guess - prev_guess) < precision: - return next_guess - - prev_guess = next_guess - - -# Let's Execute -if __name__ == "__main__": - # Find root of trigonometric function - # Find value of pi - print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}") - - # Find root of polynomial - # Find fourth Root of 5 - print(f"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}") - - # Find value of e - print( - "The root of log(y) - 1 = 0 is ", - f"{newton_raphson('log(y) - 1', 2, variable='y')}", - ) - - # Exponential Roots - print( - "The root of exp(x) - 1 = 0 is", - f"{newton_raphson('exp(x) - 1', 10, precision=0.005)}", - ) - - # Find root of cos(x) - print(f"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}") From 6971af2416af051b13f888bebdfefa222c89c15d Mon Sep 17 00:00:00 2001 From: Marek Mazij <112333347+Mrk-Mzj@users.noreply.github.com> Date: Tue, 24 Oct 2023 00:22:09 +0200 Subject: [PATCH 213/306] feat: RGB to CMYK color converter (#10741) * feat: code functional, commented, tested * fix: compering types, exception msg, line length * fix: type hints --- conversions/rgb_cmyk_conversion.py | 71 ++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 conversions/rgb_cmyk_conversion.py diff --git a/conversions/rgb_cmyk_conversion.py b/conversions/rgb_cmyk_conversion.py new file mode 100644 index 000000000..07d65b704 --- /dev/null +++ b/conversions/rgb_cmyk_conversion.py @@ -0,0 +1,71 @@ +def rgb_to_cmyk(r_input: int, g_input: int, b_input: int) -> tuple[int, int, int, int]: + """ + Simple RGB to CMYK conversion. Returns percentages of CMYK paint. + https://www.programmingalgorithms.com/algorithm/rgb-to-cmyk/ + + Note: this is a very popular algorithm that converts colors linearly and gives + only approximate results. Actual preparation for printing requires advanced color + conversion considering the color profiles and parameters of the target device. + + >>> rgb_to_cmyk(255, 200, "a") + Traceback (most recent call last): + ... + ValueError: Expected int, found (, , ) + + >>> rgb_to_cmyk(255, 255, 999) + Traceback (most recent call last): + ... + ValueError: Expected int of the range 0..255 + + >>> rgb_to_cmyk(255, 255, 255) # white + (0, 0, 0, 0) + + >>> rgb_to_cmyk(128, 128, 128) # gray + (0, 0, 0, 50) + + >>> rgb_to_cmyk(0, 0, 0) # black + (0, 0, 0, 100) + + >>> rgb_to_cmyk(255, 0, 0) # red + (0, 100, 100, 0) + + >>> rgb_to_cmyk(0, 255, 0) # green + (100, 0, 100, 0) + + >>> rgb_to_cmyk(0, 0, 255) # blue + (100, 100, 0, 0) + """ + + if ( + not isinstance(r_input, int) + or not isinstance(g_input, int) + or not isinstance(b_input, int) + ): + msg = f"Expected int, found {type(r_input), type(g_input), type(b_input)}" + raise ValueError(msg) + + if not 0 <= r_input < 256 or not 0 <= g_input < 256 or not 0 <= b_input < 256: + raise ValueError("Expected int of the range 0..255") + + # changing range from 0..255 to 0..1 + r = r_input / 255 + g = g_input / 255 + b = b_input / 255 + + k = 1 - max(r, g, b) + + if k == 1: # pure black + return 0, 0, 0, 100 + + c = round(100 * (1 - r - k) / (1 - k)) + m = round(100 * (1 - g - k) / (1 - k)) + y = round(100 * (1 - b - k) / (1 - k)) + k = round(100 * k) + + return c, m, y, k + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 481aff7928b6a352c3cfa49045f0dd390d9d0868 Mon Sep 17 00:00:00 2001 From: Gourav Raj <59208847+gouravrajbit@users.noreply.github.com> Date: Tue, 24 Oct 2023 13:24:38 +0530 Subject: [PATCH 214/306] Add `Mirror a Binary Tree` solution (#9534) * Add `Invert a Binary Tree` solution * Add type * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add `doctest` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add `test` to `get_tree_inorder` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add `test` changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix lint errors * Fix precommit errors * Update and rename invert_binary_tree.py to mirror_binary_tree.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../binary_tree/mirror_binary_tree.py | 153 ++++++++++++++++++ 1 file changed, 153 insertions(+) create mode 100644 data_structures/binary_tree/mirror_binary_tree.py diff --git a/data_structures/binary_tree/mirror_binary_tree.py b/data_structures/binary_tree/mirror_binary_tree.py new file mode 100644 index 000000000..39305c2a9 --- /dev/null +++ b/data_structures/binary_tree/mirror_binary_tree.py @@ -0,0 +1,153 @@ +""" +Given the root of a binary tree, mirror the tree, and return its root. + +Leetcode problem reference: https://leetcode.com/problems/mirror-binary-tree/ +""" +from __future__ import annotations + +from collections.abc import Iterator +from dataclasses import dataclass + + +@dataclass +class Node: + """ + A Node has value variable and pointers to Nodes to its left and right. + """ + + value: int + left: Node | None = None + right: Node | None = None + + def __iter__(self) -> Iterator[int]: + if self.left: + yield from self.left + yield self.value + if self.right: + yield from self.right + + def __len__(self) -> int: + return sum(1 for _ in self) + + def mirror(self) -> Node: + """ + Mirror the binary tree rooted at this node by swapping left and right children. + + >>> tree = Node(0) + >>> list(tree) + [0] + >>> list(tree.mirror()) + [0] + >>> tree = Node(1, Node(0), Node(3, Node(2), Node(4, None, Node(5)))) + >>> tuple(tree) + (0, 1, 2, 3, 4, 5) + >>> tuple(tree.mirror()) + (5, 4, 3, 2, 1, 0) + """ + self.left, self.right = self.right, self.left + if self.left: + self.left.mirror() + if self.right: + self.right.mirror() + return self + + +def make_tree_seven() -> Node: + r""" + Return a binary tree with 7 nodes that looks like this: + 1 + / \ + 2 3 + / \ / \ + 4 5 6 7 + + >>> tree_seven = make_tree_seven() + >>> len(tree_seven) + 7 + >>> list(tree_seven) + [4, 2, 5, 1, 6, 3, 7] + """ + tree = Node(1) + tree.left = Node(2) + tree.right = Node(3) + tree.left.left = Node(4) + tree.left.right = Node(5) + tree.right.left = Node(6) + tree.right.right = Node(7) + return tree + + +def make_tree_nine() -> Node: + r""" + Return a binary tree with 9 nodes that looks like this: + 1 + / \ + 2 3 + / \ \ + 4 5 6 + / \ \ + 7 8 9 + + >>> tree_nine = make_tree_nine() + >>> len(tree_nine) + 9 + >>> list(tree_nine) + [7, 4, 8, 2, 5, 9, 1, 3, 6] + """ + tree = Node(1) + tree.left = Node(2) + tree.right = Node(3) + tree.left.left = Node(4) + tree.left.right = Node(5) + tree.right.right = Node(6) + tree.left.left.left = Node(7) + tree.left.left.right = Node(8) + tree.left.right.right = Node(9) + return tree + + +def main() -> None: + r""" + Mirror binary trees with the given root and returns the root + + >>> tree = make_tree_nine() + >>> tuple(tree) + (7, 4, 8, 2, 5, 9, 1, 3, 6) + >>> tuple(tree.mirror()) + (6, 3, 1, 9, 5, 2, 8, 4, 7) + + nine_tree: + 1 + / \ + 2 3 + / \ \ + 4 5 6 + / \ \ + 7 8 9 + + The mirrored tree looks like this: + 1 + / \ + 3 2 + / / \ + 6 5 4 + / / \ + 9 8 7 + """ + trees = {"zero": Node(0), "seven": make_tree_seven(), "nine": make_tree_nine()} + for name, tree in trees.items(): + print(f" The {name} tree: {tuple(tree)}") + # (0,) + # (4, 2, 5, 1, 6, 3, 7) + # (7, 4, 8, 2, 5, 9, 1, 3, 6) + print(f"Mirror of {name} tree: {tuple(tree.mirror())}") + # (0,) + # (7, 3, 6, 1, 5, 2, 4) + # (6, 3, 1, 9, 5, 2, 8, 4, 7) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + main() From 17059b7ece0e9b2aa0f6e1789d635d6c3eef93ca Mon Sep 17 00:00:00 2001 From: Bhavesh Mathur <130584844+bhavesh1oo@users.noreply.github.com> Date: Tue, 24 Oct 2023 14:33:22 +0530 Subject: [PATCH 215/306] Added doctests , type hints for other/nested_brackets.py (#10872) * Added doctests , type hints * Update nested_brackets.py --------- Co-authored-by: Christian Clauss --- other/nested_brackets.py | 69 ++++++++++++++++++++++++++++------------ 1 file changed, 49 insertions(+), 20 deletions(-) diff --git a/other/nested_brackets.py b/other/nested_brackets.py index 19c6dd53c..5760fa29b 100644 --- a/other/nested_brackets.py +++ b/other/nested_brackets.py @@ -3,9 +3,9 @@ The nested brackets problem is a problem that determines if a sequence of brackets are properly nested. A sequence of brackets s is considered properly nested if any of the following conditions are true: - - s is empty - - s has the form (U) or [U] or {U} where U is a properly nested string - - s has the form VW where V and W are properly nested strings + - s is empty + - s has the form (U) or [U] or {U} where U is a properly nested string + - s has the form VW where V and W are properly nested strings For example, the string "()()[()]" is properly nested but "[(()]" is not. @@ -14,31 +14,60 @@ brackets and returns true if S is nested and false otherwise. """ -def is_balanced(s): - stack = [] - open_brackets = set({"(", "[", "{"}) - closed_brackets = set({")", "]", "}"}) +def is_balanced(s: str) -> bool: + """ + >>> is_balanced("") + True + >>> is_balanced("()") + True + >>> is_balanced("[]") + True + >>> is_balanced("{}") + True + >>> is_balanced("()[]{}") + True + >>> is_balanced("(())") + True + >>> is_balanced("[[") + False + >>> is_balanced("([{}])") + True + >>> is_balanced("(()[)]") + False + >>> is_balanced("([)]") + False + >>> is_balanced("[[()]]") + True + >>> is_balanced("(()(()))") + True + >>> is_balanced("]") + False + >>> is_balanced("Life is a bowl of cherries.") + True + >>> is_balanced("Life is a bowl of che{}ies.") + True + >>> is_balanced("Life is a bowl of che}{ies.") + False + """ open_to_closed = {"{": "}", "[": "]", "(": ")"} - - for i in range(len(s)): - if s[i] in open_brackets: - stack.append(s[i]) - - elif s[i] in closed_brackets and ( - len(stack) == 0 or (len(stack) > 0 and open_to_closed[stack.pop()] != s[i]) + stack = [] + for symbol in s: + if symbol in open_to_closed: + stack.append(symbol) + elif symbol in open_to_closed.values() and ( + not stack or open_to_closed[stack.pop()] != symbol ): return False - - return len(stack) == 0 + return not stack # stack should be empty def main(): s = input("Enter sequence of brackets: ") - if is_balanced(s): - print(s, "is balanced") - else: - print(s, "is not balanced") + print(f"'{s}' is {'' if is_balanced(s) else 'not '}balanced.") if __name__ == "__main__": + from doctest import testmod + + testmod() main() From eb17fcf8f5e77a6d3c870427db02b258515b4997 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 24 Oct 2023 14:45:36 +0200 Subject: [PATCH 216/306] Use dataclasses in circular_linked_list.py (#10884) * Use dataclasses in circular_linked_list.py * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 + .../linked_list/circular_linked_list.py | 49 ++++++++----------- 2 files changed, 23 insertions(+), 28 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index f0b1f7c13..5f8eabb6d 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -157,6 +157,7 @@ * [Prefix Conversions](conversions/prefix_conversions.py) * [Prefix Conversions String](conversions/prefix_conversions_string.py) * [Pressure Conversions](conversions/pressure_conversions.py) + * [Rgb Cmyk Conversion](conversions/rgb_cmyk_conversion.py) * [Rgb Hsv Conversion](conversions/rgb_hsv_conversion.py) * [Roman Numerals](conversions/roman_numerals.py) * [Speed Conversions](conversions/speed_conversions.py) @@ -198,6 +199,7 @@ * [Lowest Common Ancestor](data_structures/binary_tree/lowest_common_ancestor.py) * [Maximum Fenwick Tree](data_structures/binary_tree/maximum_fenwick_tree.py) * [Merge Two Binary Trees](data_structures/binary_tree/merge_two_binary_trees.py) + * [Mirror Binary Tree](data_structures/binary_tree/mirror_binary_tree.py) * [Non Recursive Segment Tree](data_structures/binary_tree/non_recursive_segment_tree.py) * [Number Of Possible Binary Trees](data_structures/binary_tree/number_of_possible_binary_trees.py) * [Red Black Tree](data_structures/binary_tree/red_black_tree.py) diff --git a/data_structures/linked_list/circular_linked_list.py b/data_structures/linked_list/circular_linked_list.py index 54343c80a..bb64441d4 100644 --- a/data_structures/linked_list/circular_linked_list.py +++ b/data_structures/linked_list/circular_linked_list.py @@ -1,27 +1,20 @@ from __future__ import annotations from collections.abc import Iterator +from dataclasses import dataclass from typing import Any +@dataclass class Node: - def __init__(self, data: Any): - """ - Initialize a new Node with the given data. - Args: - data: The data to be stored in the node. - """ - self.data: Any = data - self.next: Node | None = None # Reference to the next node + data: Any + next_node: Node | None = None +@dataclass class CircularLinkedList: - def __init__(self) -> None: - """ - Initialize an empty Circular Linked List. - """ - self.head: Node | None = None # Reference to the head (first node) - self.tail: Node | None = None # Reference to the tail (last node) + head: Node | None = None # Reference to the head (first node) + tail: Node | None = None # Reference to the tail (last node) def __iter__(self) -> Iterator[Any]: """ @@ -32,7 +25,7 @@ class CircularLinkedList: node = self.head while node: yield node.data - node = node.next + node = node.next_node if node == self.head: break @@ -76,20 +69,20 @@ class CircularLinkedList: raise IndexError("list index out of range.") new_node: Node = Node(data) if self.head is None: - new_node.next = new_node # First node points to itself + new_node.next_node = new_node # First node points to itself self.tail = self.head = new_node elif index == 0: # Insert at the head - new_node.next = self.head + new_node.next_node = self.head assert self.tail is not None # List is not empty, tail exists - self.head = self.tail.next = new_node + self.head = self.tail.next_node = new_node else: temp: Node | None = self.head for _ in range(index - 1): assert temp is not None - temp = temp.next + temp = temp.next_node assert temp is not None - new_node.next = temp.next - temp.next = new_node + new_node.next_node = temp.next_node + temp.next_node = new_node if index == len(self) - 1: # Insert at the tail self.tail = new_node @@ -130,18 +123,18 @@ class CircularLinkedList: if self.head == self.tail: # Just one node self.head = self.tail = None elif index == 0: # Delete head node - assert self.tail.next is not None - self.tail.next = self.tail.next.next - self.head = self.head.next + assert self.tail.next_node is not None + self.tail.next_node = self.tail.next_node.next_node + self.head = self.head.next_node else: temp: Node | None = self.head for _ in range(index - 1): assert temp is not None - temp = temp.next + temp = temp.next_node assert temp is not None - assert temp.next is not None - delete_node = temp.next - temp.next = temp.next.next + assert temp.next_node is not None + delete_node = temp.next_node + temp.next_node = temp.next_node.next_node if index == len(self) - 1: # Delete at tail self.tail = temp return delete_node.data From a23dd7ecbea89be8f6b3c7fcf214425274db0d02 Mon Sep 17 00:00:00 2001 From: SEIKH NABAB UDDIN <93948993+nababuddin@users.noreply.github.com> Date: Tue, 24 Oct 2023 18:42:32 +0530 Subject: [PATCH 217/306] Change from only weatherstack to both (#10882) * Update current_weather.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update current_weather.py * Update current_weather.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update current_weather.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update current_weather.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update current_weather.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update current_weather.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update current_weather.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update current_weather.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update current_weather.py * Update current_weather.py * Update current_weather.py * Update current_weather.py * import requests --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- web_programming/current_weather.py | 60 ++++++++++++++++++++---------- 1 file changed, 40 insertions(+), 20 deletions(-) diff --git a/web_programming/current_weather.py b/web_programming/current_weather.py index 3ed4c8a95..3b6cd177c 100644 --- a/web_programming/current_weather.py +++ b/web_programming/current_weather.py @@ -1,30 +1,50 @@ import requests -APPID = "" # <-- Put your OpenWeatherMap appid here! -URL_BASE = "https://api.openweathermap.org/data/2.5/" +# Put your API key(s) here +OPENWEATHERMAP_API_KEY = "" +WEATHERSTACK_API_KEY = "" + +# Define the URL for the APIs with placeholders +OPENWEATHERMAP_URL_BASE = "https://api.openweathermap.org/data/2.5/weather" +WEATHERSTACK_URL_BASE = "http://api.weatherstack.com/current" -def current_weather(q: str = "Chicago", appid: str = APPID) -> dict: - """https://openweathermap.org/api""" - return requests.get(URL_BASE + "weather", params=locals()).json() - - -def weather_forecast(q: str = "Kolkata, India", appid: str = APPID) -> dict: - """https://openweathermap.org/forecast5""" - return requests.get(URL_BASE + "forecast", params=locals()).json() - - -def weather_onecall(lat: float = 55.68, lon: float = 12.57, appid: str = APPID) -> dict: - """https://openweathermap.org/api/one-call-api""" - return requests.get(URL_BASE + "onecall", params=locals()).json() +def current_weather(location: str) -> list[dict]: + """ + >>> current_weather("location") + Traceback (most recent call last): + ... + ValueError: No API keys provided or no valid data returned. + """ + weather_data = [] + if OPENWEATHERMAP_API_KEY: + params_openweathermap = {"q": location, "appid": OPENWEATHERMAP_API_KEY} + response_openweathermap = requests.get( + OPENWEATHERMAP_URL_BASE, params=params_openweathermap + ) + weather_data.append({"OpenWeatherMap": response_openweathermap.json()}) + if WEATHERSTACK_API_KEY: + params_weatherstack = {"query": location, "access_key": WEATHERSTACK_API_KEY} + response_weatherstack = requests.get( + WEATHERSTACK_URL_BASE, params=params_weatherstack + ) + weather_data.append({"Weatherstack": response_weatherstack.json()}) + if not weather_data: + raise ValueError("No API keys provided or no valid data returned.") + return weather_data if __name__ == "__main__": from pprint import pprint - while True: - location = input("Enter a location:").strip() + location = "to be determined..." + while location: + location = input("Enter a location (city name or latitude,longitude): ").strip() if location: - pprint(current_weather(location)) - else: - break + try: + weather_data = current_weather(location) + for forecast in weather_data: + pprint(forecast) + except ValueError as e: + print(repr(e)) + location = "" From 28f4c16132170bf1e00d414809aff0c31d043e22 Mon Sep 17 00:00:00 2001 From: Saptadeep Banerjee <69459134+imSanko@users.noreply.github.com> Date: Tue, 24 Oct 2023 19:16:00 +0530 Subject: [PATCH 218/306] Tried new TESTS for the binomial_coefficient (#10822) * Tried new TESTS for the binomial_coefficient * Fix the tests request * Update binomial_coefficient.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update binomial_coefficient.py --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/binomial_coefficient.py | 46 +++++++++++++++++++++++++++++++++-- 1 file changed, 44 insertions(+), 2 deletions(-) diff --git a/maths/binomial_coefficient.py b/maths/binomial_coefficient.py index 6d5b46cb5..24c54326e 100644 --- a/maths/binomial_coefficient.py +++ b/maths/binomial_coefficient.py @@ -1,10 +1,48 @@ def binomial_coefficient(n: int, r: int) -> int: """ - Find binomial coefficient using pascals triangle. + Find binomial coefficient using Pascal's triangle. + + Calculate C(n, r) using Pascal's triangle. + + :param n: The total number of items. + :param r: The number of items to choose. + :return: The binomial coefficient C(n, r). >>> binomial_coefficient(10, 5) 252 + >>> binomial_coefficient(10, 0) + 1 + >>> binomial_coefficient(0, 10) + 1 + >>> binomial_coefficient(10, 10) + 1 + >>> binomial_coefficient(5, 2) + 10 + >>> binomial_coefficient(5, 6) + 0 + >>> binomial_coefficient(3, 5) + 0 + >>> binomial_coefficient(-2, 3) + Traceback (most recent call last): + ... + ValueError: n and r must be non-negative integers + >>> binomial_coefficient(5, -1) + Traceback (most recent call last): + ... + ValueError: n and r must be non-negative integers + >>> binomial_coefficient(10.1, 5) + Traceback (most recent call last): + ... + TypeError: 'float' object cannot be interpreted as an integer + >>> binomial_coefficient(10, 5.1) + Traceback (most recent call last): + ... + TypeError: 'float' object cannot be interpreted as an integer """ + if n < 0 or r < 0: + raise ValueError("n and r must be non-negative integers") + if 0 in (n, r): + return 1 c = [0 for i in range(r + 1)] # nc0 = 1 c[0] = 1 @@ -17,4 +55,8 @@ def binomial_coefficient(n: int, r: int) -> int: return c[r] -print(binomial_coefficient(n=10, r=5)) +if __name__ == "__main__": + from doctest import testmod + + testmod() + print(binomial_coefficient(n=10, r=5)) From aeee0f42a5684e42cb77b664570dd2d29e04b7c1 Mon Sep 17 00:00:00 2001 From: Hardik Pawar <97388607+Hardvan@users.noreply.github.com> Date: Tue, 24 Oct 2023 20:06:24 +0530 Subject: [PATCH 219/306] Add doctests for fractional knapsack (#10891) * Add doctests for fractional knapsack * Update greedy_methods/fractional_knapsack.py Co-authored-by: Christian Clauss * Run doctests * Update greedy_methods/fractional_knapsack.py Co-authored-by: Christian Clauss * Update greedy_methods/fractional_knapsack.py --------- Co-authored-by: Christian Clauss --- greedy_methods/fractional_knapsack.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/greedy_methods/fractional_knapsack.py b/greedy_methods/fractional_knapsack.py index 58976d40c..d52b56f23 100644 --- a/greedy_methods/fractional_knapsack.py +++ b/greedy_methods/fractional_knapsack.py @@ -6,6 +6,30 @@ def frac_knapsack(vl, wt, w, n): """ >>> frac_knapsack([60, 100, 120], [10, 20, 30], 50, 3) 240.0 + >>> frac_knapsack([10, 40, 30, 50], [5, 4, 6, 3], 10, 4) + 105.0 + >>> frac_knapsack([10, 40, 30, 50], [5, 4, 6, 3], 8, 4) + 95.0 + >>> frac_knapsack([10, 40, 30, 50], [5, 4, 6], 8, 4) + 60.0 + >>> frac_knapsack([10, 40, 30], [5, 4, 6, 3], 8, 4) + 60.0 + >>> frac_knapsack([10, 40, 30, 50], [5, 4, 6, 3], 0, 4) + 0 + >>> frac_knapsack([10, 40, 30, 50], [5, 4, 6, 3], 8, 0) + 95.0 + >>> frac_knapsack([10, 40, 30, 50], [5, 4, 6, 3], -8, 4) + 0 + >>> frac_knapsack([10, 40, 30, 50], [5, 4, 6, 3], 8, -4) + 95.0 + >>> frac_knapsack([10, 40, 30, 50], [5, 4, 6, 3], 800, 4) + 130 + >>> frac_knapsack([10, 40, 30, 50], [5, 4, 6, 3], 8, 400) + 95.0 + >>> frac_knapsack("ABCD", [5, 4, 6, 3], 8, 400) + Traceback (most recent call last): + ... + TypeError: unsupported operand type(s) for /: 'str' and 'int' """ r = sorted(zip(vl, wt), key=lambda x: x[0] / x[1], reverse=True) From 28302db9417daf769bec3aface9016afabeb5133 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Tue, 24 Oct 2023 21:23:17 +0530 Subject: [PATCH 220/306] Remove myself from CODEOWNERS (#10220) --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 05cd709a8..a0531cdee 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -7,7 +7,7 @@ # Order is important. The last matching pattern has the most precedence. -/.* @cclauss @dhruvmanila +/.* @cclauss # /arithmetic_analysis/ From fd227d802661d4be4babae66075542dc153b4569 Mon Sep 17 00:00:00 2001 From: Hardik Pawar <97388607+Hardvan@users.noreply.github.com> Date: Wed, 25 Oct 2023 03:05:38 +0530 Subject: [PATCH 221/306] Add function docstrings, comments and type hints (#10893) * Add function docstrings, comments and type hints * Fix type mismatch * Fix type hint error * Fix float to int error * Update ford_fulkerson.py * Update ford_fulkerson.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update ford_fulkerson.py --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- networking_flow/ford_fulkerson.py | 111 ++++++++++++++++++++++-------- 1 file changed, 81 insertions(+), 30 deletions(-) diff --git a/networking_flow/ford_fulkerson.py b/networking_flow/ford_fulkerson.py index 716ed508e..7d5fb522e 100644 --- a/networking_flow/ford_fulkerson.py +++ b/networking_flow/ford_fulkerson.py @@ -1,39 +1,95 @@ -# Ford-Fulkerson Algorithm for Maximum Flow Problem """ +Ford-Fulkerson Algorithm for Maximum Flow Problem +* https://en.wikipedia.org/wiki/Ford%E2%80%93Fulkerson_algorithm + Description: - (1) Start with initial flow as 0; - (2) Choose augmenting path from source to sink and add path to flow; + (1) Start with initial flow as 0 + (2) Choose the augmenting path from source to sink and add the path to flow """ +graph = [ + [0, 16, 13, 0, 0, 0], + [0, 0, 10, 12, 0, 0], + [0, 4, 0, 0, 14, 0], + [0, 0, 9, 0, 0, 20], + [0, 0, 0, 7, 0, 4], + [0, 0, 0, 0, 0, 0], +] -def bfs(graph, s, t, parent): - # Return True if there is node that has not iterated. - visited = [False] * len(graph) - queue = [] - queue.append(s) - visited[s] = True +def breadth_first_search(graph: list, source: int, sink: int, parents: list) -> bool: + """ + This function returns True if there is a node that has not iterated. + + Args: + graph: Adjacency matrix of graph + source: Source + sink: Sink + parents: Parent list + + Returns: + True if there is a node that has not iterated. + + >>> breadth_first_search(graph, 0, 5, [-1, -1, -1, -1, -1, -1]) + True + >>> breadth_first_search(graph, 0, 6, [-1, -1, -1, -1, -1, -1]) + Traceback (most recent call last): + ... + IndexError: list index out of range + """ + visited = [False] * len(graph) # Mark all nodes as not visited + queue = [] # breadth-first search queue + + # Source node + queue.append(source) + visited[source] = True while queue: - u = queue.pop(0) - for ind in range(len(graph[u])): - if visited[ind] is False and graph[u][ind] > 0: + u = queue.pop(0) # Pop the front node + # Traverse all adjacent nodes of u + for ind, node in enumerate(graph[u]): + if visited[ind] is False and node > 0: queue.append(ind) visited[ind] = True - parent[ind] = u - - return visited[t] + parents[ind] = u + return visited[sink] -def ford_fulkerson(graph, source, sink): - # This array is filled by BFS and to store path +def ford_fulkerson(graph: list, source: int, sink: int) -> int: + """ + This function returns the maximum flow from source to sink in the given graph. + + CAUTION: This function changes the given graph. + + Args: + graph: Adjacency matrix of graph + source: Source + sink: Sink + + Returns: + Maximum flow + + >>> test_graph = [ + ... [0, 16, 13, 0, 0, 0], + ... [0, 0, 10, 12, 0, 0], + ... [0, 4, 0, 0, 14, 0], + ... [0, 0, 9, 0, 0, 20], + ... [0, 0, 0, 7, 0, 4], + ... [0, 0, 0, 0, 0, 0], + ... ] + >>> ford_fulkerson(test_graph, 0, 5) + 23 + """ + # This array is filled by breadth-first search and to store path parent = [-1] * (len(graph)) max_flow = 0 - while bfs(graph, source, sink, parent): - path_flow = float("Inf") + + # While there is a path from source to sink + while breadth_first_search(graph, source, sink, parent): + path_flow = int(1e9) # Infinite value s = sink while s != source: - # Find the minimum value in select path + # Find the minimum value in the selected path path_flow = min(path_flow, graph[parent[s]][s]) s = parent[s] @@ -45,17 +101,12 @@ def ford_fulkerson(graph, source, sink): graph[u][v] -= path_flow graph[v][u] += path_flow v = parent[v] + return max_flow -graph = [ - [0, 16, 13, 0, 0, 0], - [0, 0, 10, 12, 0, 0], - [0, 4, 0, 0, 14, 0], - [0, 0, 9, 0, 0, 20], - [0, 0, 0, 7, 0, 4], - [0, 0, 0, 0, 0, 0], -] +if __name__ == "__main__": + from doctest import testmod -source, sink = 0, 5 -print(ford_fulkerson(graph, source, sink)) + testmod() + print(f"{ford_fulkerson(graph, source=0, sink=5) = }") From dab4e648965a92a7f73aa5fe6ad8b8afc0fde7f9 Mon Sep 17 00:00:00 2001 From: Bisma nadeem <130698042+Bisma-Nadeemm@users.noreply.github.com> Date: Wed, 25 Oct 2023 02:51:04 +0500 Subject: [PATCH 222/306] Code enhancements in binary_insertion_sort.py (#10918) * Code enhancements in binary_insertion_sort.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- sorts/binary_insertion_sort.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/sorts/binary_insertion_sort.py b/sorts/binary_insertion_sort.py index 8d4102558..50653a99e 100644 --- a/sorts/binary_insertion_sort.py +++ b/sorts/binary_insertion_sort.py @@ -12,10 +12,11 @@ python binary_insertion_sort.py def binary_insertion_sort(collection: list) -> list: - """Pure implementation of the binary insertion sort algorithm in Python - :param collection: some mutable ordered collection with heterogeneous - comparable items inside - :return: the same collection ordered by ascending + """ + Sorts a list using the binary insertion sort algorithm. + + :param collection: A mutable ordered collection with comparable items. + :return: The same collection ordered in ascending order. Examples: >>> binary_insertion_sort([0, 4, 1234, 4, 1]) @@ -39,23 +40,27 @@ def binary_insertion_sort(collection: list) -> list: n = len(collection) for i in range(1, n): - val = collection[i] + value_to_insert = collection[i] low = 0 high = i - 1 while low <= high: mid = (low + high) // 2 - if val < collection[mid]: + if value_to_insert < collection[mid]: high = mid - 1 else: low = mid + 1 for j in range(i, low, -1): collection[j] = collection[j - 1] - collection[low] = val + collection[low] = value_to_insert return collection -if __name__ == "__main__": +if __name__ == "__main": user_input = input("Enter numbers separated by a comma:\n").strip() - unsorted = [int(item) for item in user_input.split(",")] - print(binary_insertion_sort(unsorted)) + try: + unsorted = [int(item) for item in user_input.split(",")] + except ValueError: + print("Invalid input. Please enter valid integers separated by commas.") + raise + print(f"{binary_insertion_sort(unsorted) = }") From 76acc6de607eebdc0d0d5c68396030d8e240a6ea Mon Sep 17 00:00:00 2001 From: Iyiola Aloko <48067557+ialoko@users.noreply.github.com> Date: Tue, 24 Oct 2023 17:53:31 -0400 Subject: [PATCH 223/306] Adding doctests to frequency_finder.py (#10341) * Update frequency_finder.py * Update frequency_finder.py --------- Co-authored-by: Christian Clauss --- strings/frequency_finder.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/strings/frequency_finder.py b/strings/frequency_finder.py index 19f97afbb..8479c81ae 100644 --- a/strings/frequency_finder.py +++ b/strings/frequency_finder.py @@ -49,6 +49,15 @@ def get_item_at_index_zero(x: tuple) -> str: def get_frequency_order(message: str) -> str: + """ + Get the frequency order of the letters in the given string + >>> get_frequency_order('Hello World') + 'LOWDRHEZQXJKVBPYGFMUCSNIAT' + >>> get_frequency_order('Hello@') + 'LHOEZQXJKVBPYGFWMUCDRSNIAT' + >>> get_frequency_order('h') + 'HZQXJKVBPYGFWMUCLDRSNIOATE' + """ letter_to_freq = get_letter_count(message) freq_to_letter: dict[int, list[str]] = { freq: [] for letter, freq in letter_to_freq.items() From c2c6cb0f5c46346cab99121d236b2f5748e3c1df Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 25 Oct 2023 22:28:23 +0200 Subject: [PATCH 224/306] Add dataclasses to binary_search_tree.py (#10920) --- .../binary_tree/binary_search_tree.py | 69 ++++++++++++++++--- 1 file changed, 60 insertions(+), 9 deletions(-) diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index a706d21e3..38691c475 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -14,6 +14,16 @@ Example >>> t.insert(8, 3, 6, 1, 10, 14, 13, 4, 7) >>> print(" ".join(repr(i.value) for i in t.traversal_tree())) 8 3 1 6 4 7 10 14 13 + +>>> tuple(i.value for i in t.traversal_tree(inorder)) +(1, 3, 4, 6, 7, 8, 10, 13, 14) +>>> tuple(t) +(1, 3, 4, 6, 7, 8, 10, 13, 14) +>>> t.find_kth_smallest(3, t.root) +4 +>>> tuple(t)[3-1] +4 + >>> print(" ".join(repr(i.value) for i in t.traversal_tree(postorder))) 1 4 7 6 3 13 14 10 8 >>> t.remove(20) @@ -39,8 +49,12 @@ Prints all the elements of the list in order traversal Test existence >>> t.search(6) is not None True +>>> 6 in t +True >>> t.search(-1) is not None False +>>> -1 in t +False >>> t.search(6).is_right True @@ -49,26 +63,47 @@ False >>> t.get_max().value 14 +>>> max(t) +14 >>> t.get_min().value 1 +>>> min(t) +1 >>> t.empty() False +>>> not t +False >>> for i in testlist: ... t.remove(i) >>> t.empty() True +>>> not t +True """ +from __future__ import annotations -from collections.abc import Iterable +from collections.abc import Iterable, Iterator +from dataclasses import dataclass from typing import Any +@dataclass class Node: - def __init__(self, value: int | None = None): - self.value = value - self.parent: Node | None = None # Added in order to delete a node easier - self.left: Node | None = None - self.right: Node | None = None + value: int + left: Node | None = None + right: Node | None = None + parent: Node | None = None # Added in order to delete a node easier + + def __iter__(self) -> Iterator[int]: + """ + >>> list(Node(0)) + [0] + >>> list(Node(0, Node(-1), Node(1), None)) + [-1, 0, 1] + """ + yield from self.left or [] + yield self.value + yield from self.right or [] def __repr__(self) -> str: from pprint import pformat @@ -79,12 +114,18 @@ class Node: @property def is_right(self) -> bool: - return self.parent is not None and self is self.parent.right + return bool(self.parent and self is self.parent.right) +@dataclass class BinarySearchTree: - def __init__(self, root: Node | None = None): - self.root = root + root: Node | None = None + + def __bool__(self) -> bool: + return bool(self.root) + + def __iter__(self) -> Iterator[int]: + yield from self.root or [] def __str__(self) -> str: """ @@ -227,6 +268,16 @@ class BinarySearchTree: return arr[k - 1] +def inorder(curr_node: Node | None) -> list[Node]: + """ + inorder (left, self, right) + """ + node_list = [] + if curr_node is not None: + node_list = inorder(curr_node.left) + [curr_node] + inorder(curr_node.right) + return node_list + + def postorder(curr_node: Node | None) -> list[Node]: """ postOrder (left, right, self) From 3d0a409ce119e1b7734ebaa2ffea660f5359080b Mon Sep 17 00:00:00 2001 From: AdityaAtoZ <129844626+AdityaAtoZ@users.noreply.github.com> Date: Thu, 26 Oct 2023 02:03:35 +0530 Subject: [PATCH 225/306] Improved Equilibrium Index of an Array. (#10899) * Improved Equilibrium Index of an Array. This is the modifications made to the original code: 1. Create Doctest Instructions: Python "doctest" can be executed by running the following command: python -m doctest -v equilibrium_index.py. 2. Deleted Argument {size}: Deleted the `size` argument because `len(arr)} allows the array's length to be determined inside the function, simplifying and improving the readability of the function signature. 3. Used {enumerate}: To improve code readability and indicate that we're working with element-index pairs, we iterated through the array using both elements and their indices using the `enumerate` function. 4. Optimized the Loop: To prevent pointless additions, the loop was improved by initializing {left_sum} with the value of the first element (arr[0]). Furthermore, since the beginning and last items (0 and size - 1) cannot be equilibrium indices, there is no need to check them, saving further computations. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../arrays/equilibrium_index_in_array.py | 21 +++++++++---------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/data_structures/arrays/equilibrium_index_in_array.py b/data_structures/arrays/equilibrium_index_in_array.py index 4099896d2..8802db620 100644 --- a/data_structures/arrays/equilibrium_index_in_array.py +++ b/data_structures/arrays/equilibrium_index_in_array.py @@ -2,7 +2,7 @@ Find the Equilibrium Index of an Array. Reference: https://www.geeksforgeeks.org/equilibrium-index-of-an-array/ -Python doctests can be run with the following command: +Python doctest can be run with the following command: python -m doctest -v equilibrium_index.py Given a sequence arr[] of size n, this function returns @@ -20,35 +20,34 @@ Output: 3 """ -def equilibrium_index(arr: list[int], size: int) -> int: +def equilibrium_index(arr: list[int]) -> int: """ Find the equilibrium index of an array. Args: - arr : The input array of integers. - size : The size of the array. + arr (list[int]): The input array of integers. Returns: int: The equilibrium index or -1 if no equilibrium index exists. Examples: - >>> equilibrium_index([-7, 1, 5, 2, -4, 3, 0], 7) + >>> equilibrium_index([-7, 1, 5, 2, -4, 3, 0]) 3 - >>> equilibrium_index([1, 2, 3, 4, 5], 5) + >>> equilibrium_index([1, 2, 3, 4, 5]) -1 - >>> equilibrium_index([1, 1, 1, 1, 1], 5) + >>> equilibrium_index([1, 1, 1, 1, 1]) 2 - >>> equilibrium_index([2, 4, 6, 8, 10, 3], 6) + >>> equilibrium_index([2, 4, 6, 8, 10, 3]) -1 """ total_sum = sum(arr) left_sum = 0 - for i in range(size): - total_sum -= arr[i] + for i, value in enumerate(arr): + total_sum -= value if left_sum == total_sum: return i - left_sum += arr[i] + left_sum += value return -1 From e1e5963812c3f59a60181307bccf15792ad2406c Mon Sep 17 00:00:00 2001 From: Bisma nadeem <130698042+Bisma-Nadeemm@users.noreply.github.com> Date: Thu, 26 Oct 2023 02:26:54 +0500 Subject: [PATCH 226/306] Code Enhancements in merge_sort.py (#10911) * Code Enhancements in merge_sort.py This enhanced code includes improved variable naming, error handling for user input, and more detailed docstrings. It's now more robust and readable. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- sorts/merge_sort.py | 47 ++++++++++++++++++++++++++------------------- 1 file changed, 27 insertions(+), 20 deletions(-) diff --git a/sorts/merge_sort.py b/sorts/merge_sort.py index e80b1cb22..0628b848b 100644 --- a/sorts/merge_sort.py +++ b/sorts/merge_sort.py @@ -12,9 +12,13 @@ python merge_sort.py def merge_sort(collection: list) -> list: """ - :param collection: some mutable ordered collection with heterogeneous - comparable items inside - :return: the same collection ordered by ascending + Sorts a list using the merge sort algorithm. + + :param collection: A mutable ordered collection with comparable items. + :return: The same collection ordered in ascending order. + + Time Complexity: O(n log n) + Examples: >>> merge_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] @@ -26,31 +30,34 @@ def merge_sort(collection: list) -> list: def merge(left: list, right: list) -> list: """ - Merge left and right. + Merge two sorted lists into a single sorted list. - :param left: left collection - :param right: right collection - :return: merge result + :param left: Left collection + :param right: Right collection + :return: Merged result """ - - def _merge(): - while left and right: - yield (left if left[0] <= right[0] else right).pop(0) - yield from left - yield from right - - return list(_merge()) + result = [] + while left and right: + result.append(left.pop(0) if left[0] <= right[0] else right.pop(0)) + result.extend(left) + result.extend(right) + return result if len(collection) <= 1: return collection - mid = len(collection) // 2 - return merge(merge_sort(collection[:mid]), merge_sort(collection[mid:])) + mid_index = len(collection) // 2 + return merge(merge_sort(collection[:mid_index]), merge_sort(collection[mid_index:])) if __name__ == "__main__": import doctest doctest.testmod() - user_input = input("Enter numbers separated by a comma:\n").strip() - unsorted = [int(item) for item in user_input.split(",")] - print(*merge_sort(unsorted), sep=",") + + try: + user_input = input("Enter numbers separated by a comma:\n").strip() + unsorted = [int(item) for item in user_input.split(",")] + sorted_list = merge_sort(unsorted) + print(*sorted_list, sep=",") + except ValueError: + print("Invalid input. Please enter valid integers separated by commas.") From 0ffe506ea79fcd9820a6c9bf3194a3bfcd677b57 Mon Sep 17 00:00:00 2001 From: Humzafazal72 <125209604+Humzafazal72@users.noreply.github.com> Date: Thu, 26 Oct 2023 04:05:35 +0500 Subject: [PATCH 227/306] added mean absolute percentage error (#10464) * added mean absolute percentage error * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added mean_absolute_percentage_error * added mean_absolute_percentage_error * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added mean_absolute_percentage_error * added mean_absolute_percentage_error * added mean absolute percentage error * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added mean absolute percentage error * added mean absolute percentage error * added mean absolute percentage error * added mean absolute percentage error * added mean absolute percentage error * Update machine_learning/loss_functions.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- machine_learning/loss_functions.py | 45 ++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index ef3429636..e5b7a713b 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -297,6 +297,51 @@ def mean_squared_logarithmic_error(y_true: np.ndarray, y_pred: np.ndarray) -> fl return np.mean(squared_logarithmic_errors) +def mean_absolute_percentage_error( + y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-15 +) -> float: + """ + Calculate the Mean Absolute Percentage Error between y_true and y_pred. + + Mean Absolute Percentage Error calculates the average of the absolute + percentage differences between the predicted and true values. + + Formula = (Σ|y_true[i]-Y_pred[i]/y_true[i]|)/n + + Source: https://stephenallwright.com/good-mape-score/ + + Parameters: + y_true (np.ndarray): Numpy array containing true/target values. + y_pred (np.ndarray): Numpy array containing predicted values. + + Returns: + float: The Mean Absolute Percentage error between y_true and y_pred. + + Examples: + >>> y_true = np.array([10, 20, 30, 40]) + >>> y_pred = np.array([12, 18, 33, 45]) + >>> mean_absolute_percentage_error(y_true, y_pred) + 0.13125 + + >>> y_true = np.array([1, 2, 3, 4]) + >>> y_pred = np.array([2, 3, 4, 5]) + >>> mean_absolute_percentage_error(y_true, y_pred) + 0.5208333333333333 + + >>> y_true = np.array([34, 37, 44, 47, 48, 48, 46, 43, 32, 27, 26, 24]) + >>> y_pred = np.array([37, 40, 46, 44, 46, 50, 45, 44, 34, 30, 22, 23]) + >>> mean_absolute_percentage_error(y_true, y_pred) + 0.064671076436071 + """ + if len(y_true) != len(y_pred): + raise ValueError("The length of the two arrays should be the same.") + + y_true = np.where(y_true == 0, epsilon, y_true) + absolute_percentage_diff = np.abs((y_true - y_pred) / y_true) + + return np.mean(absolute_percentage_diff) + + if __name__ == "__main__": import doctest From 0e7f8284a32286534691e437d67405b6a09b10e1 Mon Sep 17 00:00:00 2001 From: Dale Dai <145884899+CouldNot@users.noreply.github.com> Date: Wed, 25 Oct 2023 22:27:46 -0700 Subject: [PATCH 228/306] Add error tests in doctest and fix error message (#10930) * Add error tests in doctest and fix error message * Change AssertationError to ValueError * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/prime_check.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/maths/prime_check.py b/maths/prime_check.py index c17877a57..f1bc4def2 100644 --- a/maths/prime_check.py +++ b/maths/prime_check.py @@ -29,12 +29,19 @@ def is_prime(number: int) -> bool: True >>> is_prime(67483) False + >>> is_prime(16.1) + Traceback (most recent call last): + ... + ValueError: is_prime() only accepts positive integers + >>> is_prime(-4) + Traceback (most recent call last): + ... + ValueError: is_prime() only accepts positive integers """ # precondition - assert isinstance(number, int) and ( - number >= 0 - ), "'number' must been an int and positive" + if not isinstance(number, int) or not number >= 0: + raise ValueError("is_prime() only accepts positive integers") if 1 < number < 4: # 2 and 3 are primes @@ -64,7 +71,7 @@ class Test(unittest.TestCase): assert is_prime(29) def test_not_primes(self): - with pytest.raises(AssertionError): + with pytest.raises(ValueError): is_prime(-19) assert not is_prime( 0 From 1a5d5cf93d30fc123af680ee9c58eb955932972b Mon Sep 17 00:00:00 2001 From: Megan Payne Date: Thu, 26 Oct 2023 07:31:47 +0200 Subject: [PATCH 229/306] Mean absolute error (#10927) * added mean absolute error to loss_functions.py * added doctest to mean absolute error to loss_functions.py * fixed long line in loss_functions.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixed error in MAE * Update machine_learning/loss_functions.py Co-authored-by: Tianyi Zheng --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- machine_learning/loss_functions.py | 37 ++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index e5b7a713b..ea1f390e3 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -261,6 +261,43 @@ def mean_squared_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: return np.mean(squared_errors) +def mean_absolute_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: + """ + Calculates the Mean Absolute Error (MAE) between ground truth (observed) + and predicted values. + + MAE measures the absolute difference between true values and predicted values. + + Equation: + MAE = (1/n) * Σ(abs(y_true - y_pred)) + + Reference: https://en.wikipedia.org/wiki/Mean_absolute_error + + Parameters: + - y_true: The true values (ground truth) + - y_pred: The predicted values + + >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) + >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) + >>> np.isclose(mean_absolute_error(true_values, predicted_values), 0.16) + True + >>> true_values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) + >>> predicted_values = np.array([0.8, 2.1, 2.9, 4.2, 5.2]) + >>> np.isclose(mean_absolute_error(true_values, predicted_values), 2.16) + False + >>> true_labels = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) + >>> predicted_probs = np.array([0.3, 0.8, 0.9, 5.2]) + >>> mean_absolute_error(true_labels, predicted_probs) + Traceback (most recent call last): + ... + ValueError: Input arrays must have the same length. + """ + if len(y_true) != len(y_pred): + raise ValueError("Input arrays must have the same length.") + + return np.mean(abs(y_true - y_pred)) + + def mean_squared_logarithmic_error(y_true: np.ndarray, y_pred: np.ndarray) -> float: """ Calculate the mean squared logarithmic error (MSLE) between ground truth and From a8f05fe0a5d8b7e88d99c160b177ff3f3f07edcc Mon Sep 17 00:00:00 2001 From: Ed Date: Thu, 26 Oct 2023 00:02:35 -0700 Subject: [PATCH 230/306] Add doctests and type hints (#10974) * Add doctests and type hints * Apply suggestions from code review * Update tarjans_scc.py * Update tarjans_scc.py --------- Co-authored-by: Tianyi Zheng --- graphs/tarjans_scc.py | 35 +++++++++++++++++++++++++++++------ 1 file changed, 29 insertions(+), 6 deletions(-) diff --git a/graphs/tarjans_scc.py b/graphs/tarjans_scc.py index dfd2e5270..a75dc4d2c 100644 --- a/graphs/tarjans_scc.py +++ b/graphs/tarjans_scc.py @@ -1,7 +1,7 @@ from collections import deque -def tarjan(g): +def tarjan(g: list[list[int]]) -> list[list[int]]: """ Tarjan's algo for finding strongly connected components in a directed graph @@ -19,15 +19,30 @@ def tarjan(g): Complexity: strong_connect() is called at most once for each node and has a complexity of O(|E|) as it is DFS. Therefore this has complexity O(|V| + |E|) for a graph G = (V, E) + + >>> tarjan([[2, 3, 4], [2, 3, 4], [0, 1, 3], [0, 1, 2], [1]]) + [[4, 3, 1, 2, 0]] + >>> tarjan([[], [], [], []]) + [[0], [1], [2], [3]] + >>> a = [0, 1, 2, 3, 4, 5, 4] + >>> b = [1, 0, 3, 2, 5, 4, 0] + >>> n = 7 + >>> sorted(tarjan(create_graph(n, list(zip(a, b))))) == sorted( + ... tarjan(create_graph(n, list(zip(a[::-1], b[::-1]))))) + True + >>> a = [0, 1, 2, 3, 4, 5, 6] + >>> b = [0, 1, 2, 3, 4, 5, 6] + >>> sorted(tarjan(create_graph(n, list(zip(a, b))))) + [[0], [1], [2], [3], [4], [5], [6]] """ n = len(g) - stack = deque() + stack: deque[int] = deque() on_stack = [False for _ in range(n)] index_of = [-1 for _ in range(n)] lowlink_of = index_of[:] - def strong_connect(v, index, components): + def strong_connect(v: int, index: int, components: list[list[int]]) -> int: index_of[v] = index # the number when this node is seen lowlink_of[v] = index # lowest rank node reachable from here index += 1 @@ -57,7 +72,7 @@ def tarjan(g): components.append(component) return index - components = [] + components: list[list[int]] = [] for v in range(n): if index_of[v] == -1: strong_connect(v, 0, components) @@ -65,8 +80,16 @@ def tarjan(g): return components -def create_graph(n, edges): - g = [[] for _ in range(n)] +def create_graph(n: int, edges: list[tuple[int, int]]) -> list[list[int]]: + """ + >>> n = 7 + >>> source = [0, 0, 1, 2, 3, 3, 4, 4, 6] + >>> target = [1, 3, 2, 0, 1, 4, 5, 6, 5] + >>> edges = list(zip(source, target)) + >>> create_graph(n, edges) + [[1, 3], [2], [0], [1, 4], [5, 6], [], [5]] + """ + g: list[list[int]] = [[] for _ in range(n)] for u, v in edges: g[u].append(v) return g From c71c280726fb4e9487833993042e54598fe94fd9 Mon Sep 17 00:00:00 2001 From: Ravi Kumar <119737193+ravi-ivar-7@users.noreply.github.com> Date: Thu, 26 Oct 2023 12:50:28 +0530 Subject: [PATCH 231/306] added runge kutta gills method to maths/ numerical_analysis (#10967) * added runge kutta gills method * Apply suggestions from code review --------- Co-authored-by: Tianyi Zheng --- maths/numerical_analysis/runge_kutta_gills.py | 89 +++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 maths/numerical_analysis/runge_kutta_gills.py diff --git a/maths/numerical_analysis/runge_kutta_gills.py b/maths/numerical_analysis/runge_kutta_gills.py new file mode 100644 index 000000000..2bd9cd612 --- /dev/null +++ b/maths/numerical_analysis/runge_kutta_gills.py @@ -0,0 +1,89 @@ +""" +Use the Runge-Kutta-Gill's method of order 4 to solve Ordinary Differential Equations. + +https://www.geeksforgeeks.org/gills-4th-order-method-to-solve-differential-equations/ +Author : Ravi Kumar +""" +from collections.abc import Callable +from math import sqrt + +import numpy as np + + +def runge_kutta_gills( + func: Callable[[float, float], float], + x_initial: float, + y_initial: float, + step_size: float, + x_final: float, +) -> np.ndarray: + """ + Solve an Ordinary Differential Equations using Runge-Kutta-Gills Method of order 4. + + args: + func: An ordinary differential equation (ODE) as function of x and y. + x_initial: The initial value of x. + y_initial: The initial value of y. + step_size: The increment value of x. + x_final: The final value of x. + + Returns: + Solution of y at each nodal point + + >>> def f(x, y): + ... return (x-y)/2 + >>> y = runge_kutta_gills(f, 0, 3, 0.2, 5) + >>> y[-1] + 3.4104259225717537 + + >>> def f(x,y): + ... return x + >>> y = runge_kutta_gills(f, -1, 0, 0.2, 0) + >>> y + array([ 0. , -0.18, -0.32, -0.42, -0.48, -0.5 ]) + + >>> def f(x, y): + ... return x + y + >>> y = runge_kutta_gills(f, 0, 0, 0.2, -1) + Traceback (most recent call last): + ... + ValueError: The final value of x must be greater than initial value of x. + + >>> def f(x, y): + ... return x + >>> y = runge_kutta_gills(f, -1, 0, -0.2, 0) + Traceback (most recent call last): + ... + ValueError: Step size must be positive. + """ + if x_initial >= x_final: + raise ValueError( + "The final value of x must be greater than initial value of x." + ) + + if step_size <= 0: + raise ValueError("Step size must be positive.") + + n = int((x_final - x_initial) / step_size) + y = np.zeros(n + 1) + y[0] = y_initial + for i in range(n): + k1 = step_size * func(x_initial, y[i]) + k2 = step_size * func(x_initial + step_size / 2, y[i] + k1 / 2) + k3 = step_size * func( + x_initial + step_size / 2, + y[i] + (-0.5 + 1 / sqrt(2)) * k1 + (1 - 1 / sqrt(2)) * k2, + ) + k4 = step_size * func( + x_initial + step_size, y[i] - (1 / sqrt(2)) * k2 + (1 + 1 / sqrt(2)) * k3 + ) + + y[i + 1] = y[i] + (k1 + (2 - sqrt(2)) * k2 + (2 + sqrt(2)) * k3 + k4) / 6 + x_initial += step_size + return y + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From dd7d18d49e9edc635f692b1f3db933e8ea717023 Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Thu, 26 Oct 2023 13:25:56 +0530 Subject: [PATCH 232/306] Added doctest, docstring and typehint for sigmoid_function & cost_function (#10828) * Added doctest for sigmoid_function & cost_function * Update logistic_regression.py * Update logistic_regression.py * Minor formatting changes in doctests * Apply suggestions from code review * Made requested changes in logistic_regression.py * Apply suggestions from code review --------- Co-authored-by: Tianyi Zheng --- machine_learning/logistic_regression.py | 60 ++++++++++++++++++++++++- 1 file changed, 58 insertions(+), 2 deletions(-) diff --git a/machine_learning/logistic_regression.py b/machine_learning/logistic_regression.py index f9da0104a..59a70fd65 100644 --- a/machine_learning/logistic_regression.py +++ b/machine_learning/logistic_regression.py @@ -27,7 +27,7 @@ from sklearn import datasets # classification problems -def sigmoid_function(z): +def sigmoid_function(z: float | np.ndarray) -> float | np.ndarray: """ Also known as Logistic Function. @@ -42,11 +42,63 @@ def sigmoid_function(z): @param z: input to the function @returns: returns value in the range 0 to 1 + + Examples: + >>> sigmoid_function(4) + 0.9820137900379085 + >>> sigmoid_function(np.array([-3, 3])) + array([0.04742587, 0.95257413]) + >>> sigmoid_function(np.array([-3, 3, 1])) + array([0.04742587, 0.95257413, 0.73105858]) + >>> sigmoid_function(np.array([-0.01, -2, -1.9])) + array([0.49750002, 0.11920292, 0.13010847]) + >>> sigmoid_function(np.array([-1.3, 5.3, 12])) + array([0.21416502, 0.9950332 , 0.99999386]) + >>> sigmoid_function(np.array([0.01, 0.02, 4.1])) + array([0.50249998, 0.50499983, 0.9836975 ]) + >>> sigmoid_function(np.array([0.8])) + array([0.68997448]) """ return 1 / (1 + np.exp(-z)) -def cost_function(h, y): +def cost_function(h: np.ndarray, y: np.ndarray) -> float: + """ + Cost function quantifies the error between predicted and expected values. + The cost function used in Logistic Regression is called Log Loss + or Cross Entropy Function. + + J(θ) = (1/m) * Σ [ -y * log(hθ(x)) - (1 - y) * log(1 - hθ(x)) ] + + Where: + - J(θ) is the cost that we want to minimize during training + - m is the number of training examples + - Σ represents the summation over all training examples + - y is the actual binary label (0 or 1) for a given example + - hθ(x) is the predicted probability that x belongs to the positive class + + @param h: the output of sigmoid function. It is the estimated probability + that the input example 'x' belongs to the positive class + + @param y: the actual binary label associated with input example 'x' + + Examples: + >>> estimations = sigmoid_function(np.array([0.3, -4.3, 8.1])) + >>> cost_function(h=estimations,y=np.array([1, 0, 1])) + 0.18937868932131605 + >>> estimations = sigmoid_function(np.array([4, 3, 1])) + >>> cost_function(h=estimations,y=np.array([1, 0, 0])) + 1.459999655669926 + >>> estimations = sigmoid_function(np.array([4, -3, -1])) + >>> cost_function(h=estimations,y=np.array([1,0,0])) + 0.1266663223365915 + >>> estimations = sigmoid_function(0) + >>> cost_function(h=estimations,y=np.array([1])) + 0.6931471805599453 + + References: + - https://en.wikipedia.org/wiki/Logistic_regression + """ return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean() @@ -75,6 +127,10 @@ def logistic_reg(alpha, x, y, max_iterations=70000): # In[68]: if __name__ == "__main__": + import doctest + + doctest.testmod() + iris = datasets.load_iris() x = iris.data[:, :2] y = (iris.target != 0) * 1 From e5a6a97c3277fbf849b77d1328720782128ecafd Mon Sep 17 00:00:00 2001 From: Sanjay <146640686+san-jay-14@users.noreply.github.com> Date: Thu, 26 Oct 2023 13:28:40 +0530 Subject: [PATCH 233/306] Added Lens formulae to the Physics repository (#10187) * Added Lens formulae to the Physics repository * Resolved the commented issues * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update lens_formulae.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- physics/lens_formulae.py | 131 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 131 insertions(+) create mode 100644 physics/lens_formulae.py diff --git a/physics/lens_formulae.py b/physics/lens_formulae.py new file mode 100644 index 000000000..162f3a8f3 --- /dev/null +++ b/physics/lens_formulae.py @@ -0,0 +1,131 @@ +""" +This module has functions which calculate focal length of lens, distance of +image from the lens and distance of object from the lens. +The above is calculated using the lens formula. + +In optics, the relationship between the distance of the image (v), +the distance of the object (u), and +the focal length (f) of the lens is given by the formula known as the Lens formula. +The Lens formula is applicable for convex as well as concave lenses. The formula +is given as follows: + +------------------- +| 1/f = 1/v + 1/u | +------------------- + +Where + f = focal length of the lens in meters. + v = distance of the image from the lens in meters. + u = distance of the object from the lens in meters. + +To make our calculations easy few assumptions are made while deriving the formula +which are important to keep in mind before solving this equation. +The assumptions are as follows: + 1. The object O is a point object lying somewhere on the principle axis. + 2. The lens is thin. + 3. The aperture of the lens taken must be small. + 4. The angles of incidence and angle of refraction should be small. + +Sign convention is a set of rules to set signs for image distance, object distance, +focal length, etc +for mathematical analysis of image formation. According to it: + 1. Object is always placed to the left of lens. + 2. All distances are measured from the optical centre of the mirror. + 3. Distances measured in the direction of the incident ray are positive and + the distances measured in the direction opposite + to that of the incident rays are negative. + 4. Distances measured along y-axis above the principal axis are positive and + that measured along y-axis below the principal + axis are negative. + +Note: Sign convention can be reversed and will still give the correct results. + +Reference for Sign convention: +https://www.toppr.com/ask/content/concept/sign-convention-for-lenses-210246/ + +Reference for assumptions: +https://testbook.com/physics/derivation-of-lens-maker-formula +""" + + +def focal_length_of_lens( + object_distance_from_lens: float, image_distance_from_lens: float +) -> float: + """ + Doctests: + >>> from math import isclose + >>> isclose(focal_length_of_lens(10,4), 6.666666666666667) + True + >>> from math import isclose + >>> isclose(focal_length_of_lens(2.7,5.8), -5.0516129032258075) + True + >>> focal_length_of_lens(0, 20) # doctest: +NORMALIZE_WHITESPACE + Traceback (most recent call last): + ... + ValueError: Invalid inputs. Enter non zero values with respect + to the sign convention. + """ + + if object_distance_from_lens == 0 or image_distance_from_lens == 0: + raise ValueError( + "Invalid inputs. Enter non zero values with respect to the sign convention." + ) + focal_length = 1 / ( + (1 / image_distance_from_lens) - (1 / object_distance_from_lens) + ) + return focal_length + + +def object_distance( + focal_length_of_lens: float, image_distance_from_lens: float +) -> float: + """ + Doctests: + >>> from math import isclose + >>> isclose(object_distance(10,40), -13.333333333333332) + True + + >>> from math import isclose + >>> isclose(object_distance(6.2,1.5), 1.9787234042553192) + True + + >>> object_distance(0, 20) # doctest: +NORMALIZE_WHITESPACE + Traceback (most recent call last): + ... + ValueError: Invalid inputs. Enter non zero values with respect + to the sign convention. + """ + + if image_distance_from_lens == 0 or focal_length_of_lens == 0: + raise ValueError( + "Invalid inputs. Enter non zero values with respect to the sign convention." + ) + + object_distance = 1 / ((1 / image_distance_from_lens) - (1 / focal_length_of_lens)) + return object_distance + + +def image_distance( + focal_length_of_lens: float, object_distance_from_lens: float +) -> float: + """ + Doctests: + >>> from math import isclose + >>> isclose(image_distance(50,40), 22.22222222222222) + True + >>> from math import isclose + >>> isclose(image_distance(5.3,7.9), 3.1719696969696973) + True + + >>> object_distance(0, 20) # doctest: +NORMALIZE_WHITESPACE + Traceback (most recent call last): + ... + ValueError: Invalid inputs. Enter non zero values with respect + to the sign convention. + """ + if object_distance_from_lens == 0 or focal_length_of_lens == 0: + raise ValueError( + "Invalid inputs. Enter non zero values with respect to the sign convention." + ) + image_distance = 1 / ((1 / object_distance_from_lens) + (1 / focal_length_of_lens)) + return image_distance From e791a2067baf3b23c0413f32c7388e3b2a95744e Mon Sep 17 00:00:00 2001 From: Mary-0165 <146911989+Mary-0165@users.noreply.github.com> Date: Thu, 26 Oct 2023 13:40:13 +0530 Subject: [PATCH 234/306] Capacitor equivalence algorithm (#9814) * capacitor equivalence algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Apply suggestions from code review * Update capacitor_equivalence.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Tianyi Zheng --- electronics/capacitor_equivalence.py | 53 ++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 electronics/capacitor_equivalence.py diff --git a/electronics/capacitor_equivalence.py b/electronics/capacitor_equivalence.py new file mode 100644 index 000000000..274b18afb --- /dev/null +++ b/electronics/capacitor_equivalence.py @@ -0,0 +1,53 @@ +# https://farside.ph.utexas.edu/teaching/316/lectures/node46.html + +from __future__ import annotations + + +def capacitor_parallel(capacitors: list[float]) -> float: + """ + Ceq = C1 + C2 + ... + Cn + Calculate the equivalent resistance for any number of capacitors in parallel. + >>> capacitor_parallel([5.71389, 12, 3]) + 20.71389 + >>> capacitor_parallel([5.71389, 12, -3]) + Traceback (most recent call last): + ... + ValueError: Capacitor at index 2 has a negative value! + """ + sum_c = 0.0 + for index, capacitor in enumerate(capacitors): + if capacitor < 0: + msg = f"Capacitor at index {index} has a negative value!" + raise ValueError(msg) + sum_c += capacitor + return sum_c + + +def capacitor_series(capacitors: list[float]) -> float: + """ + Ceq = 1/ (1/C1 + 1/C2 + ... + 1/Cn) + >>> capacitor_series([5.71389, 12, 3]) + 1.6901062252507735 + >>> capacitor_series([5.71389, 12, -3]) + Traceback (most recent call last): + ... + ValueError: Capacitor at index 2 has a negative or zero value! + >>> capacitor_series([5.71389, 12, 0.000]) + Traceback (most recent call last): + ... + ValueError: Capacitor at index 2 has a negative or zero value! + """ + + first_sum = 0.0 + for index, capacitor in enumerate(capacitors): + if capacitor <= 0: + msg = f"Capacitor at index {index} has a negative or zero value!" + raise ValueError(msg) + first_sum += 1 / capacitor + return 1 / first_sum + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From ade2837e410ec286819f0f4fd977bb411a95b379 Mon Sep 17 00:00:00 2001 From: Saurabh Mahapatra <98408932+its-100rabh@users.noreply.github.com> Date: Thu, 26 Oct 2023 13:55:08 +0530 Subject: [PATCH 235/306] Update capitalize.py (#10573) * Update capitalize.py * Update strings/capitalize.py --------- Co-authored-by: Tianyi Zheng --- strings/capitalize.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/strings/capitalize.py b/strings/capitalize.py index e7e97c2be..c0b45e0d9 100644 --- a/strings/capitalize.py +++ b/strings/capitalize.py @@ -3,7 +3,8 @@ from string import ascii_lowercase, ascii_uppercase def capitalize(sentence: str) -> str: """ - This function will capitalize the first letter of a sentence or a word + Capitalizes the first letter of a sentence or word. + >>> capitalize("hello world") 'Hello world' >>> capitalize("123 hello world") @@ -17,6 +18,10 @@ def capitalize(sentence: str) -> str: """ if not sentence: return "" + + # Create a dictionary that maps lowercase letters to uppercase letters + # Capitalize the first character if it's a lowercase letter + # Concatenate the capitalized character with the rest of the string lower_to_upper = dict(zip(ascii_lowercase, ascii_uppercase)) return lower_to_upper.get(sentence[0], sentence[0]) + sentence[1:] From 6497917352c73371730e50f063acd61cf4268076 Mon Sep 17 00:00:00 2001 From: Neha <129765919+neha3423@users.noreply.github.com> Date: Thu, 26 Oct 2023 14:24:30 +0530 Subject: [PATCH 236/306] Added Kth largest element algorithm (#10687) * neha3423 * neha3423 * neha3423 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * neha3423 * neha3423 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * neha323 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * neha3423 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * neha3423 * neha3423 * neha3423 * neha3423 * Added test case for tuple * Update kth_largest_element.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- data_structures/arrays/kth_largest_element.py | 117 ++++++++++++++++++ 1 file changed, 117 insertions(+) create mode 100644 data_structures/arrays/kth_largest_element.py diff --git a/data_structures/arrays/kth_largest_element.py b/data_structures/arrays/kth_largest_element.py new file mode 100644 index 000000000..f25cc68e9 --- /dev/null +++ b/data_structures/arrays/kth_largest_element.py @@ -0,0 +1,117 @@ +""" +Given an array of integers and an integer k, find the kth largest element in the array. + +https://stackoverflow.com/questions/251781 +""" + + +def partition(arr: list[int], low: int, high: int) -> int: + """ + Partitions list based on the pivot element. + + This function rearranges the elements in the input list 'elements' such that + all elements greater than or equal to the chosen pivot are on the right side + of the pivot, and all elements smaller than the pivot are on the left side. + + Args: + arr: The list to be partitioned + low: The lower index of the list + high: The higher index of the list + + Returns: + int: The index of pivot element after partitioning + + Examples: + >>> partition([3, 1, 4, 5, 9, 2, 6, 5, 3, 5], 0, 9) + 4 + >>> partition([7, 1, 4, 5, 9, 2, 6, 5, 8], 0, 8) + 1 + >>> partition(['apple', 'cherry', 'date', 'banana'], 0, 3) + 2 + >>> partition([3.1, 1.2, 5.6, 4.7], 0, 3) + 1 + """ + pivot = arr[high] + i = low - 1 + for j in range(low, high): + if arr[j] >= pivot: + i += 1 + arr[i], arr[j] = arr[j], arr[i] + arr[i + 1], arr[high] = arr[high], arr[i + 1] + return i + 1 + + +def kth_largest_element(arr: list[int], position: int) -> int: + """ + Finds the kth largest element in a list. + Should deliver similar results to: + ```python + def kth_largest_element(arr, position): + return sorted(arr)[-position] + ``` + + Args: + nums: The list of numbers. + k: The position of the desired kth largest element. + + Returns: + int: The kth largest element. + + Examples: + >>> kth_largest_element([3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5], 3) + 5 + >>> kth_largest_element([2, 5, 6, 1, 9, 3, 8, 4, 7, 3, 5], 1) + 9 + >>> kth_largest_element([2, 5, 6, 1, 9, 3, 8, 4, 7, 3, 5], -2) + Traceback (most recent call last): + ... + ValueError: Invalid value of 'position' + >>> kth_largest_element([9, 1, 3, 6, 7, 9, 8, 4, 2, 4, 9], 110) + Traceback (most recent call last): + ... + ValueError: Invalid value of 'position' + >>> kth_largest_element([1, 2, 4, 3, 5, 9, 7, 6, 5, 9, 3], 0) + Traceback (most recent call last): + ... + ValueError: Invalid value of 'position' + >>> kth_largest_element(['apple', 'cherry', 'date', 'banana'], 2) + 'cherry' + >>> kth_largest_element([3.1, 1.2, 5.6, 4.7,7.9,5,0], 2) + 5.6 + >>> kth_largest_element([-2, -5, -4, -1], 1) + -1 + >>> kth_largest_element([], 1) + -1 + >>> kth_largest_element([3.1, 1.2, 5.6, 4.7, 7.9, 5, 0], 1.5) + Traceback (most recent call last): + ... + ValueError: The position should be an integer + >>> kth_largest_element((4, 6, 1, 2), 4) + Traceback (most recent call last): + ... + TypeError: 'tuple' object does not support item assignment + """ + if not arr: + return -1 + if not isinstance(position, int): + raise ValueError("The position should be an integer") + if not 1 <= position <= len(arr): + raise ValueError("Invalid value of 'position'") + low, high = 0, len(arr) - 1 + while low <= high: + if low > len(arr) - 1 or high < 0: + return -1 + pivot_index = partition(arr, low, high) + if pivot_index == position - 1: + return arr[pivot_index] + elif pivot_index > position - 1: + high = pivot_index - 1 + else: + low = pivot_index + 1 + return -1 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 42c49ee1174506dd04dc2dff422328cdb7dc7201 Mon Sep 17 00:00:00 2001 From: Habip Akyol <127725897+habipakyol@users.noreply.github.com> Date: Thu, 26 Oct 2023 14:24:17 +0300 Subject: [PATCH 237/306] Fix typo in haralick_descriptors.py (#10988) --- computer_vision/haralick_descriptors.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/computer_vision/haralick_descriptors.py b/computer_vision/haralick_descriptors.py index 413cea304..007421e34 100644 --- a/computer_vision/haralick_descriptors.py +++ b/computer_vision/haralick_descriptors.py @@ -253,13 +253,13 @@ def matrix_concurrency(image: np.ndarray, coordinate: tuple[int, int]) -> np.nda def haralick_descriptors(matrix: np.ndarray) -> list[float]: - """Calculates all 8 Haralick descriptors based on co-occurence input matrix. + """Calculates all 8 Haralick descriptors based on co-occurrence input matrix. All descriptors are as follows: Maximum probability, Inverse Difference, Homogeneity, Entropy, Energy, Dissimilarity, Contrast and Correlation Args: - matrix: Co-occurence matrix to use as base for calculating descriptors. + matrix: Co-occurrence matrix to use as base for calculating descriptors. Returns: Reverse ordered list of resulting descriptors From 29b8ccdc2f685e815f12fd6e9e8b9faee21e338d Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Thu, 26 Oct 2023 17:42:28 +0530 Subject: [PATCH 238/306] Added doctest to hash_table.py (#10984) --- data_structures/hashing/hash_table.py | 81 +++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) diff --git a/data_structures/hashing/hash_table.py b/data_structures/hashing/hash_table.py index 7ca2f7c40..5bf431328 100644 --- a/data_structures/hashing/hash_table.py +++ b/data_structures/hashing/hash_table.py @@ -21,6 +21,29 @@ class HashTable: self._keys: dict = {} def keys(self): + """ + The keys function returns a dictionary containing the key value pairs. + key being the index number in hash table and value being the data value. + + Examples: + 1. creating HashTable with size 10 and inserting 3 elements + >>> ht = HashTable(10) + >>> ht.insert_data(10) + >>> ht.insert_data(20) + >>> ht.insert_data(30) + >>> ht.keys() + {0: 10, 1: 20, 2: 30} + + 2. creating HashTable with size 5 and inserting 5 elements + >>> ht = HashTable(5) + >>> ht.insert_data(5) + >>> ht.insert_data(4) + >>> ht.insert_data(3) + >>> ht.insert_data(2) + >>> ht.insert_data(1) + >>> ht.keys() + {0: 5, 4: 4, 3: 3, 2: 2, 1: 1} + """ return self._keys def balanced_factor(self): @@ -37,6 +60,43 @@ class HashTable: print(self.values) def bulk_insert(self, values): + """ + bulk_insert is used for entering more than one element at a time + in the HashTable. + + Examples: + 1. + >>> ht = HashTable(5) + >>> ht.bulk_insert((10,20,30)) + step 1 + [0, 1, 2, 3, 4] + [10, None, None, None, None] + step 2 + [0, 1, 2, 3, 4] + [10, 20, None, None, None] + step 3 + [0, 1, 2, 3, 4] + [10, 20, 30, None, None] + + 2. + >>> ht = HashTable(5) + >>> ht.bulk_insert([5,4,3,2,1]) + step 1 + [0, 1, 2, 3, 4] + [5, None, None, None, None] + step 2 + [0, 1, 2, 3, 4] + [5, None, None, None, 4] + step 3 + [0, 1, 2, 3, 4] + [5, None, None, 3, 4] + step 4 + [0, 1, 2, 3, 4] + [5, None, 2, 3, 4] + step 5 + [0, 1, 2, 3, 4] + [5, 1, 2, 3, 4] + """ i = 1 self.__aux_list = values for value in values: @@ -69,6 +129,21 @@ class HashTable: self.insert_data(value) def insert_data(self, data): + """ + insert_data is used for inserting a single element at a time in the HashTable. + + Examples: + + >>> ht = HashTable(3) + >>> ht.insert_data(5) + >>> ht.keys() + {2: 5} + >>> ht = HashTable(5) + >>> ht.insert_data(30) + >>> ht.insert_data(50) + >>> ht.keys() + {0: 30, 1: 50} + """ key = self.hash_function(data) if self.values[key] is None: @@ -84,3 +159,9 @@ class HashTable: else: self.rehashing() self.insert_data(data) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 69f7f3208e0297cea9ccd9d02b9fb690f2ee3b93 Mon Sep 17 00:00:00 2001 From: Akash_Jambulkar <97665573+Akash-Jambulkar@users.noreply.github.com> Date: Thu, 26 Oct 2023 17:57:31 +0530 Subject: [PATCH 239/306] Update cocktail_shaker_sort.py (#10987) * Update cocktail_shaker_sort.py Added a docstring with clear explanations of the function and its parameters. Changed variable names i, start, and end for better readability. Improved comments to describe the purpose of each section of the algorithm. Adjusted the loop ranges to make the code more concise and readable. Removed redundant comments and variable assignments. Provided a clear message when printing the sorted list. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update cocktail_shaker_sort.py * typing: ignore[operator] * Update cocktail_shaker_sort.py * Update cocktail_shaker_sort.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- sorts/cocktail_shaker_sort.py | 56 ++++++++++++++++++++++++----------- 1 file changed, 39 insertions(+), 17 deletions(-) diff --git a/sorts/cocktail_shaker_sort.py b/sorts/cocktail_shaker_sort.py index b738ff31d..de126426d 100644 --- a/sorts/cocktail_shaker_sort.py +++ b/sorts/cocktail_shaker_sort.py @@ -1,40 +1,62 @@ -""" https://en.wikipedia.org/wiki/Cocktail_shaker_sort """ +""" +An implementation of the cocktail shaker sort algorithm in pure Python. + +https://en.wikipedia.org/wiki/Cocktail_shaker_sort +""" -def cocktail_shaker_sort(unsorted: list) -> list: +def cocktail_shaker_sort(arr: list[int]) -> list[int]: """ - Pure implementation of the cocktail shaker sort algorithm in Python. + Sorts a list using the Cocktail Shaker Sort algorithm. + + :param arr: List of elements to be sorted. + :return: Sorted list. + >>> cocktail_shaker_sort([4, 5, 2, 1, 2]) [1, 2, 2, 4, 5] - >>> cocktail_shaker_sort([-4, 5, 0, 1, 2, 11]) [-4, 0, 1, 2, 5, 11] - >>> cocktail_shaker_sort([0.1, -2.4, 4.4, 2.2]) [-2.4, 0.1, 2.2, 4.4] - >>> cocktail_shaker_sort([1, 2, 3, 4, 5]) [1, 2, 3, 4, 5] - >>> cocktail_shaker_sort([-4, -5, -24, -7, -11]) [-24, -11, -7, -5, -4] + >>> cocktail_shaker_sort(["elderberry", "banana", "date", "apple", "cherry"]) + ['apple', 'banana', 'cherry', 'date', 'elderberry'] + >>> cocktail_shaker_sort((-4, -5, -24, -7, -11)) + Traceback (most recent call last): + ... + TypeError: 'tuple' object does not support item assignment """ - for i in range(len(unsorted) - 1, 0, -1): + start, end = 0, len(arr) - 1 + + while start < end: swapped = False - for j in range(i, 0, -1): - if unsorted[j] < unsorted[j - 1]: - unsorted[j], unsorted[j - 1] = unsorted[j - 1], unsorted[j] - swapped = True - - for j in range(i): - if unsorted[j] > unsorted[j + 1]: - unsorted[j], unsorted[j + 1] = unsorted[j + 1], unsorted[j] + # Pass from left to right + for i in range(start, end): + if arr[i] > arr[i + 1]: + arr[i], arr[i + 1] = arr[i + 1], arr[i] swapped = True if not swapped: break - return unsorted + + end -= 1 # Decrease the end pointer after each pass + + # Pass from right to left + for i in range(end, start, -1): + if arr[i] < arr[i - 1]: + arr[i], arr[i - 1] = arr[i - 1], arr[i] + swapped = True + + if not swapped: + break + + start += 1 # Increase the start pointer after each pass + + return arr if __name__ == "__main__": From 579250363db1975440c75f4f6d486b88ff568cdb Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Thu, 26 Oct 2023 08:36:53 -0400 Subject: [PATCH 240/306] Speed up `dijkstra_bankers_algorithm.py` (#10861) * updating DIRECTORY.md * Rename dijkstra_bankers_algorithm.py * Remove sleep() call * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 +- other/{dijkstra_bankers_algorithm.py => bankers_algorithm.py} | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) rename other/{dijkstra_bankers_algorithm.py => bankers_algorithm.py} (99%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 5f8eabb6d..d108acf8d 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -774,8 +774,8 @@ ## Other * [Activity Selection](other/activity_selection.py) * [Alternative List Arrange](other/alternative_list_arrange.py) + * [Bankers Algorithm](other/bankers_algorithm.py) * [Davis Putnam Logemann Loveland](other/davis_putnam_logemann_loveland.py) - * [Dijkstra Bankers Algorithm](other/dijkstra_bankers_algorithm.py) * [Doomsday](other/doomsday.py) * [Fischer Yates Shuffle](other/fischer_yates_shuffle.py) * [Gauss Easter](other/gauss_easter.py) diff --git a/other/dijkstra_bankers_algorithm.py b/other/bankers_algorithm.py similarity index 99% rename from other/dijkstra_bankers_algorithm.py rename to other/bankers_algorithm.py index be7bceba1..858eb0b2c 100644 --- a/other/dijkstra_bankers_algorithm.py +++ b/other/bankers_algorithm.py @@ -17,8 +17,6 @@ before deciding whether allocation should be allowed to continue. from __future__ import annotations -import time - import numpy as np test_claim_vector = [8, 5, 9, 7] @@ -216,7 +214,6 @@ class BankersAlgorithm: "Initial Available Resources: " + " ".join(str(x) for x in self.__available_resources()) ) - time.sleep(1) if __name__ == "__main__": From 8adbf47c75e6881f8778fc4e9490628c71cc9fa1 Mon Sep 17 00:00:00 2001 From: Kishan Kumar Rai Date: Thu, 26 Oct 2023 18:21:28 +0530 Subject: [PATCH 241/306] Fix Typo & Grammatical Errors (#10980) --- CONTRIBUTING.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index bf3420185..096582e45 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,20 +2,20 @@ ## Before contributing -Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before sending your pull requests, make sure that you __read the whole guidelines__. If you have any doubt on the contributing guide, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://gitter.im/TheAlgorithms/community). +Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before submitting your pull requests, please ensure that you __read the whole guidelines__. If you have any doubts about the contributing guide, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community on [Gitter](https://gitter.im/TheAlgorithms/community). ## Contributing ### Contributor -We are very happy that you are considering implementing algorithms and data structures for others! This repository is referenced and used by learners from all over the globe. Being one of our contributors, you agree and confirm that: +We are delighted that you are considering implementing algorithms and data structures for others! This repository is referenced and used by learners from all over the globe. By being one of our contributors, you agree and confirm that: -- You did your work - no plagiarism allowed +- You did your work - no plagiarism allowed. - Any plagiarized work will not be merged. -- Your work will be distributed under [MIT License](LICENSE.md) once your pull request is merged -- Your submitted work fulfils or mostly fulfils our styles and standards +- Your work will be distributed under [MIT License](LICENSE.md) once your pull request is merged. +- Your submitted work fulfills or mostly fulfills our styles and standards. -__New implementation__ is welcome! For example, new solutions for a problem, different representations for a graph data structure or algorithm designs with different complexity but __identical implementation__ of an existing implementation is not allowed. Please check whether the solution is already implemented or not before submitting your pull request. +__New implementation__ is welcome! For example, new solutions for a problem, different representations for a graph data structure or algorithm designs with different complexity, but __identical implementation__ of an existing implementation is not allowed. Please check whether the solution is already implemented or not before submitting your pull request. __Improving comments__ and __writing proper tests__ are also highly welcome. @@ -23,7 +23,7 @@ __Improving comments__ and __writing proper tests__ are also highly welcome. We appreciate any contribution, from fixing a grammar mistake in a comment to implementing complex algorithms. Please read this section if you are contributing your work. -Your contribution will be tested by our [automated testing on GitHub Actions](https://github.com/TheAlgorithms/Python/actions) to save time and mental energy. After you have submitted your pull request, you should see the GitHub Actions tests start to run at the bottom of your submission page. If those tests fail, then click on the ___details___ button try to read through the GitHub Actions output to understand the failure. If you do not understand, please leave a comment on your submission page and a community member will try to help. +Your contribution will be tested by our [automated testing on GitHub Actions](https://github.com/TheAlgorithms/Python/actions) to save time and mental energy. After you have submitted your pull request, you should see the GitHub Actions tests start to run at the bottom of your submission page. If those tests fail, then click on the ___details___ button to read through the GitHub Actions output to understand the failure. If you do not understand, please leave a comment on your submission page and a community member will try to help. #### Issues @@ -58,7 +58,7 @@ Algorithms should: * contain doctests that test both valid and erroneous input values * return all calculation results instead of printing or plotting them -Algorithms in this repo should not be how-to examples for existing Python packages. Instead, they should perform internal calculations or manipulations to convert input values into different output values. Those calculations or manipulations can use data types, classes, or functions of existing Python packages but each algorithm in this repo should add unique value. +Algorithms in this repo should not be how-to examples for existing Python packages. Instead, they should perform internal calculations or manipulations to convert input values into different output values. Those calculations or manipulations can use data types, classes, or functions of existing Python packages but each algorithm in this repo should add unique value. #### Pre-commit plugin Use [pre-commit](https://pre-commit.com/#installation) to automatically format your code to match our coding style: @@ -77,7 +77,7 @@ pre-commit run --all-files --show-diff-on-failure We want your work to be readable by others; therefore, we encourage you to note the following: -- Please write in Python 3.12+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will. +- Please write in Python 3.12+. For instance: `print()` is a function in Python 3 so `print "Hello"` will *not* work but `print("Hello")` will. - Please focus hard on the naming of functions, classes, and variables. Help your reader by using __descriptive names__ that can help you to remove redundant comments. - Single letter variable names are *old school* so please avoid them unless their life only spans a few lines. - Expand acronyms because `gcd()` is hard to understand but `greatest_common_divisor()` is not. @@ -145,7 +145,7 @@ We want your work to be readable by others; therefore, we encourage you to note python3 -m doctest -v my_submission.py ``` - The use of the Python builtin `input()` function is __not__ encouraged: + The use of the Python built-in `input()` function is __not__ encouraged: ```python input('Enter your input:') From 34b25c0c769b417e82bc32cd4d3a801637ee57ab Mon Sep 17 00:00:00 2001 From: Tiela Rose Black-Law <26930264+tielarose@users.noreply.github.com> Date: Thu, 26 Oct 2023 05:52:47 -0700 Subject: [PATCH 242/306] Add doctest for maths/primelib (#10978) --- maths/primelib.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/maths/primelib.py b/maths/primelib.py index e2d432e18..a26b0eaeb 100644 --- a/maths/primelib.py +++ b/maths/primelib.py @@ -454,6 +454,8 @@ def kg_v(number1, number2): 40 >>> kg_v(824,67) 55208 + >>> kg_v(1, 10) + 10 >>> kg_v(0) Traceback (most recent call last): ... From a8dfd403f6df2275272190a55edb6a739880f6a9 Mon Sep 17 00:00:00 2001 From: Ed Date: Thu, 26 Oct 2023 07:33:42 -0700 Subject: [PATCH 243/306] Add new algorithm index_2d_array_in_1d (#10973) * Add new algorithm index_2d_array_in_1d * Add doctest for iter function * The power of dataclasses * Update index_2d_array_in_1d.py --------- Co-authored-by: Christian Clauss --- .../arrays/index_2d_array_in_1d.py | 105 ++++++++++++++++++ 1 file changed, 105 insertions(+) create mode 100644 data_structures/arrays/index_2d_array_in_1d.py diff --git a/data_structures/arrays/index_2d_array_in_1d.py b/data_structures/arrays/index_2d_array_in_1d.py new file mode 100644 index 000000000..27a9fa5f9 --- /dev/null +++ b/data_structures/arrays/index_2d_array_in_1d.py @@ -0,0 +1,105 @@ +""" +Retrieves the value of an 0-indexed 1D index from a 2D array. +There are two ways to retrieve value(s): + +1. Index2DArrayIterator(matrix) -> Iterator[int] +This iterator allows you to iterate through a 2D array by passing in the matrix and +calling next(your_iterator). You can also use the iterator in a loop. +Examples: +list(Index2DArrayIterator(matrix)) +set(Index2DArrayIterator(matrix)) +tuple(Index2DArrayIterator(matrix)) +sum(Index2DArrayIterator(matrix)) +-5 in Index2DArrayIterator(matrix) + +2. index_2d_array_in_1d(array: list[int], index: int) -> int +This function allows you to provide a 2D array and a 0-indexed 1D integer index, +and retrieves the integer value at that index. + +Python doctests can be run using this command: +python3 -m doctest -v index_2d_array_in_1d.py +""" + +from collections.abc import Iterator +from dataclasses import dataclass + + +@dataclass +class Index2DArrayIterator: + matrix: list[list[int]] + + def __iter__(self) -> Iterator[int]: + """ + >>> tuple(Index2DArrayIterator([[5], [-523], [-1], [34], [0]])) + (5, -523, -1, 34, 0) + >>> tuple(Index2DArrayIterator([[5, -523, -1], [34, 0]])) + (5, -523, -1, 34, 0) + >>> tuple(Index2DArrayIterator([[5, -523, -1, 34, 0]])) + (5, -523, -1, 34, 0) + >>> t = Index2DArrayIterator([[5, 2, 25], [23, 14, 5], [324, -1, 0]]) + >>> tuple(t) + (5, 2, 25, 23, 14, 5, 324, -1, 0) + >>> list(t) + [5, 2, 25, 23, 14, 5, 324, -1, 0] + >>> sorted(t) + [-1, 0, 2, 5, 5, 14, 23, 25, 324] + >>> tuple(t)[3] + 23 + >>> sum(t) + 397 + >>> -1 in t + True + >>> t = iter(Index2DArrayIterator([[5], [-523], [-1], [34], [0]])) + >>> next(t) + 5 + >>> next(t) + -523 + """ + for row in self.matrix: + yield from row + + +def index_2d_array_in_1d(array: list[list[int]], index: int) -> int: + """ + Retrieves the value of the one-dimensional index from a two-dimensional array. + + Args: + array: A 2D array of integers where all rows are the same size and all + columns are the same size. + index: A 1D index. + + Returns: + int: The 0-indexed value of the 1D index in the array. + + Examples: + >>> index_2d_array_in_1d([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]], 5) + 5 + >>> index_2d_array_in_1d([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]], -1) + Traceback (most recent call last): + ... + ValueError: index out of range + >>> index_2d_array_in_1d([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]], 12) + Traceback (most recent call last): + ... + ValueError: index out of range + >>> index_2d_array_in_1d([[]], 0) + Traceback (most recent call last): + ... + ValueError: no items in array + """ + rows = len(array) + cols = len(array[0]) + + if rows == 0 or cols == 0: + raise ValueError("no items in array") + + if index < 0 or index >= rows * cols: + raise ValueError("index out of range") + + return array[index // cols][index % cols] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From fe4aad0ec94a2d2f28470dd8eaad3ff1bf74c5c8 Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Thu, 26 Oct 2023 20:51:45 +0530 Subject: [PATCH 244/306] Added doctest & docstring to quadratic_probing.py (#10996) * Added doctest & docstring to quadratic_probing.py * Update quadratic_probing.py * Update quadratic_probing.py --- data_structures/hashing/quadratic_probing.py | 55 ++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/data_structures/hashing/quadratic_probing.py b/data_structures/hashing/quadratic_probing.py index 0930340a3..2f3401ec8 100644 --- a/data_structures/hashing/quadratic_probing.py +++ b/data_structures/hashing/quadratic_probing.py @@ -12,6 +12,55 @@ class QuadraticProbing(HashTable): super().__init__(*args, **kwargs) def _collision_resolution(self, key, data=None): + """ + Quadratic probing is an open addressing scheme used for resolving + collisions in hash table. + + It works by taking the original hash index and adding successive + values of an arbitrary quadratic polynomial until open slot is found. + + Hash + 1², Hash + 2², Hash + 3² .... Hash + n² + + reference: + - https://en.wikipedia.org/wiki/Quadratic_probing + e.g: + 1. Create hash table with size 7 + >>> qp = QuadraticProbing(7) + >>> qp.insert_data(90) + >>> qp.insert_data(340) + >>> qp.insert_data(24) + >>> qp.insert_data(45) + >>> qp.insert_data(99) + >>> qp.insert_data(73) + >>> qp.insert_data(7) + >>> qp.keys() + {11: 45, 14: 99, 7: 24, 0: 340, 5: 73, 6: 90, 8: 7} + + 2. Create hash table with size 8 + >>> qp = QuadraticProbing(8) + >>> qp.insert_data(0) + >>> qp.insert_data(999) + >>> qp.insert_data(111) + >>> qp.keys() + {0: 0, 7: 999, 3: 111} + + 3. Try to add three data elements when the size is two + >>> qp = QuadraticProbing(2) + >>> qp.insert_data(0) + >>> qp.insert_data(999) + >>> qp.insert_data(111) + >>> qp.keys() + {0: 0, 4: 999, 1: 111} + + 4. Try to add three data elements when the size is one + >>> qp = QuadraticProbing(1) + >>> qp.insert_data(0) + >>> qp.insert_data(999) + >>> qp.insert_data(111) + >>> qp.keys() + {4: 999, 1: 111} + """ + i = 1 new_key = self.hash_function(key + i * i) @@ -27,3 +76,9 @@ class QuadraticProbing(HashTable): break return new_key + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 5987f861926c7560cd46c1e33c3cc2c0506c0ee1 Mon Sep 17 00:00:00 2001 From: Poojan Smart <44301271+PoojanSmart@users.noreply.github.com> Date: Fri, 27 Oct 2023 14:17:24 +0530 Subject: [PATCH 245/306] Add automatic differentiation algorithm (#10977) * Added automatic differentiation algorithm * file name changed * Resolved pre commit errors * updated dependency * added noqa for ignoring check * adding typing_extension for adding Self type in __new__ * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * sorted requirement.text dependency * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * resolved ruff --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- machine_learning/automatic_differentiation.py | 327 ++++++++++++++++++ requirements.txt | 3 +- 2 files changed, 329 insertions(+), 1 deletion(-) create mode 100644 machine_learning/automatic_differentiation.py diff --git a/machine_learning/automatic_differentiation.py b/machine_learning/automatic_differentiation.py new file mode 100644 index 000000000..cd2e5cdaa --- /dev/null +++ b/machine_learning/automatic_differentiation.py @@ -0,0 +1,327 @@ +""" +Demonstration of the Automatic Differentiation (Reverse mode). + +Reference: https://en.wikipedia.org/wiki/Automatic_differentiation + +Author: Poojan Smart +Email: smrtpoojan@gmail.com +""" +from __future__ import annotations + +from collections import defaultdict +from enum import Enum +from types import TracebackType +from typing import Any + +import numpy as np +from typing_extensions import Self # noqa: UP035 + + +class OpType(Enum): + """ + Class represents list of supported operations on Variable for gradient calculation. + """ + + ADD = 0 + SUB = 1 + MUL = 2 + DIV = 3 + MATMUL = 4 + POWER = 5 + NOOP = 6 + + +class Variable: + """ + Class represents n-dimensional object which is used to wrap numpy array on which + operations will be performed and the gradient will be calculated. + + Examples: + >>> Variable(5.0) + Variable(5.0) + >>> Variable([5.0, 2.9]) + Variable([5. 2.9]) + >>> Variable([5.0, 2.9]) + Variable([1.0, 5.5]) + Variable([6. 8.4]) + >>> Variable([[8.0, 10.0]]) + Variable([[ 8. 10.]]) + """ + + def __init__(self, value: Any) -> None: + self.value = np.array(value) + + # pointers to the operations to which the Variable is input + self.param_to: list[Operation] = [] + # pointer to the operation of which the Variable is output of + self.result_of: Operation = Operation(OpType.NOOP) + + def __repr__(self) -> str: + return f"Variable({self.value})" + + def to_ndarray(self) -> np.ndarray: + return self.value + + def __add__(self, other: Variable) -> Variable: + result = Variable(self.value + other.value) + + with GradientTracker() as tracker: + # if tracker is enabled, computation graph will be updated + if tracker.enabled: + tracker.append(OpType.ADD, params=[self, other], output=result) + return result + + def __sub__(self, other: Variable) -> Variable: + result = Variable(self.value - other.value) + + with GradientTracker() as tracker: + # if tracker is enabled, computation graph will be updated + if tracker.enabled: + tracker.append(OpType.SUB, params=[self, other], output=result) + return result + + def __mul__(self, other: Variable) -> Variable: + result = Variable(self.value * other.value) + + with GradientTracker() as tracker: + # if tracker is enabled, computation graph will be updated + if tracker.enabled: + tracker.append(OpType.MUL, params=[self, other], output=result) + return result + + def __truediv__(self, other: Variable) -> Variable: + result = Variable(self.value / other.value) + + with GradientTracker() as tracker: + # if tracker is enabled, computation graph will be updated + if tracker.enabled: + tracker.append(OpType.DIV, params=[self, other], output=result) + return result + + def __matmul__(self, other: Variable) -> Variable: + result = Variable(self.value @ other.value) + + with GradientTracker() as tracker: + # if tracker is enabled, computation graph will be updated + if tracker.enabled: + tracker.append(OpType.MATMUL, params=[self, other], output=result) + return result + + def __pow__(self, power: int) -> Variable: + result = Variable(self.value**power) + + with GradientTracker() as tracker: + # if tracker is enabled, computation graph will be updated + if tracker.enabled: + tracker.append( + OpType.POWER, + params=[self], + output=result, + other_params={"power": power}, + ) + return result + + def add_param_to(self, param_to: Operation) -> None: + self.param_to.append(param_to) + + def add_result_of(self, result_of: Operation) -> None: + self.result_of = result_of + + +class Operation: + """ + Class represents operation between single or two Variable objects. + Operation objects contains type of operation, pointers to input Variable + objects and pointer to resulting Variable from the operation. + """ + + def __init__( + self, + op_type: OpType, + other_params: dict | None = None, + ) -> None: + self.op_type = op_type + self.other_params = {} if other_params is None else other_params + + def add_params(self, params: list[Variable]) -> None: + self.params = params + + def add_output(self, output: Variable) -> None: + self.output = output + + def __eq__(self, value) -> bool: + return self.op_type == value if isinstance(value, OpType) else False + + +class GradientTracker: + """ + Class contains methods to compute partial derivatives of Variable + based on the computation graph. + + Examples: + + >>> with GradientTracker() as tracker: + ... a = Variable([2.0, 5.0]) + ... b = Variable([1.0, 2.0]) + ... m = Variable([1.0, 2.0]) + ... c = a + b + ... d = a * b + ... e = c / d + >>> tracker.gradient(e, a) + array([-0.25, -0.04]) + >>> tracker.gradient(e, b) + array([-1. , -0.25]) + >>> tracker.gradient(e, m) is None + True + + >>> with GradientTracker() as tracker: + ... a = Variable([[2.0, 5.0]]) + ... b = Variable([[1.0], [2.0]]) + ... c = a @ b + >>> tracker.gradient(c, a) + array([[1., 2.]]) + >>> tracker.gradient(c, b) + array([[2.], + [5.]]) + + >>> with GradientTracker() as tracker: + ... a = Variable([[2.0, 5.0]]) + ... b = a ** 3 + >>> tracker.gradient(b, a) + array([[12., 75.]]) + """ + + instance = None + + def __new__(cls) -> Self: + """ + Executes at the creation of class object and returns if + object is already created. This class follows singleton + design pattern. + """ + if cls.instance is None: + cls.instance = super().__new__(cls) + return cls.instance + + def __init__(self) -> None: + self.enabled = False + + def __enter__(self) -> Self: + self.enabled = True + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc: BaseException | None, + traceback: TracebackType | None, + ) -> None: + self.enabled = False + + def append( + self, + op_type: OpType, + params: list[Variable], + output: Variable, + other_params: dict | None = None, + ) -> None: + """ + Adds Operation object to the related Variable objects for + creating computational graph for calculating gradients. + + Args: + op_type: Operation type + params: Input parameters to the operation + output: Output variable of the operation + """ + operation = Operation(op_type, other_params=other_params) + param_nodes = [] + for param in params: + param.add_param_to(operation) + param_nodes.append(param) + output.add_result_of(operation) + + operation.add_params(param_nodes) + operation.add_output(output) + + def gradient(self, target: Variable, source: Variable) -> np.ndarray | None: + """ + Reverse accumulation of partial derivatives to calculate gradients + of target variable with respect to source variable. + + Args: + target: target variable for which gradients are calculated. + source: source variable with respect to which the gradients are + calculated. + + Returns: + Gradient of the source variable with respect to the target variable + """ + + # partial derivatives with respect to target + partial_deriv = defaultdict(lambda: 0) + partial_deriv[target] = np.ones_like(target.to_ndarray()) + + # iterating through each operations in the computation graph + operation_queue = [target.result_of] + while len(operation_queue) > 0: + operation = operation_queue.pop() + for param in operation.params: + # as per the chain rule, multiplying partial derivatives + # of variables with respect to the target + dparam_doutput = self.derivative(param, operation) + dparam_dtarget = dparam_doutput * partial_deriv[operation.output] + partial_deriv[param] += dparam_dtarget + + if param.result_of and param.result_of != OpType.NOOP: + operation_queue.append(param.result_of) + + return partial_deriv.get(source) + + def derivative(self, param: Variable, operation: Operation) -> np.ndarray: + """ + Compute the derivative of given operation/function + + Args: + param: variable to be differentiated + operation: function performed on the input variable + + Returns: + Derivative of input variable with respect to the output of + the operation + """ + params = operation.params + + if operation == OpType.ADD: + return np.ones_like(params[0].to_ndarray(), dtype=np.float64) + if operation == OpType.SUB: + if params[0] == param: + return np.ones_like(params[0].to_ndarray(), dtype=np.float64) + return -np.ones_like(params[1].to_ndarray(), dtype=np.float64) + if operation == OpType.MUL: + return ( + params[1].to_ndarray().T + if params[0] == param + else params[0].to_ndarray().T + ) + if operation == OpType.DIV: + if params[0] == param: + return 1 / params[1].to_ndarray() + return -params[0].to_ndarray() / (params[1].to_ndarray() ** 2) + if operation == OpType.MATMUL: + return ( + params[1].to_ndarray().T + if params[0] == param + else params[0].to_ndarray().T + ) + if operation == OpType.POWER: + power = operation.other_params["power"] + return power * (params[0].to_ndarray() ** (power - 1)) + + err_msg = f"invalid operation type: {operation.op_type}" + raise ValueError(err_msg) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/requirements.txt b/requirements.txt index 05d9f1e8c..8937f6bb0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -19,5 +19,6 @@ statsmodels sympy tensorflow ; python_version < '3.12' tweepy -xgboost # yulewalker # uncomment once audio_filters/equal_loudness_filter.py is fixed +typing_extensions +xgboost From 34eb9c529a74c3f3d1b878a1c7ca2529686b41f8 Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Fri, 27 Oct 2023 18:06:43 +0530 Subject: [PATCH 246/306] Added doctest to hash_table.py (#11023) * Added doctest to hash_table.py * Update hash_table.py * Update hash_table.py * Update hash_table.py * Update hash_table.py * Apply suggestions from code review * Update hash_table.py --------- Co-authored-by: Christian Clauss --- data_structures/hashing/hash_table.py | 113 ++++++++++++++++++++++++++ 1 file changed, 113 insertions(+) diff --git a/data_structures/hashing/hash_table.py b/data_structures/hashing/hash_table.py index 5bf431328..7fe57068f 100644 --- a/data_structures/hashing/hash_table.py +++ b/data_structures/hashing/hash_table.py @@ -52,6 +52,30 @@ class HashTable: ) def hash_function(self, key): + """ + Generates hash for the given key value + + Examples: + + Creating HashTable with size 5 + >>> ht = HashTable(5) + >>> ht.hash_function(10) + 0 + >>> ht.hash_function(20) + 0 + >>> ht.hash_function(4) + 4 + >>> ht.hash_function(18) + 3 + >>> ht.hash_function(-18) + 2 + >>> ht.hash_function(18.5) + 3.5 + >>> ht.hash_function(0) + 0 + >>> ht.hash_function(-0) + 0 + """ return key % self.size_table def _step_by_step(self, step_ord): @@ -105,10 +129,99 @@ class HashTable: i += 1 def _set_value(self, key, data): + """ + _set_value functions allows to update value at a particular hash + + Examples: + 1. _set_value in HashTable of size 5 + >>> ht = HashTable(5) + >>> ht.insert_data(10) + >>> ht.insert_data(20) + >>> ht.insert_data(30) + >>> ht._set_value(0,15) + >>> ht.keys() + {0: 15, 1: 20, 2: 30} + + 2. _set_value in HashTable of size 2 + >>> ht = HashTable(2) + >>> ht.insert_data(17) + >>> ht.insert_data(18) + >>> ht.insert_data(99) + >>> ht._set_value(3,15) + >>> ht.keys() + {3: 15, 2: 17, 4: 99} + + 3. _set_value in HashTable when hash is not present + >>> ht = HashTable(2) + >>> ht.insert_data(17) + >>> ht.insert_data(18) + >>> ht.insert_data(99) + >>> ht._set_value(0,15) + >>> ht.keys() + {3: 18, 2: 17, 4: 99, 0: 15} + + 4. _set_value in HashTable when multiple hash are not present + >>> ht = HashTable(2) + >>> ht.insert_data(17) + >>> ht.insert_data(18) + >>> ht.insert_data(99) + >>> ht._set_value(0,15) + >>> ht._set_value(1,20) + >>> ht.keys() + {3: 18, 2: 17, 4: 99, 0: 15, 1: 20} + """ self.values[key] = data self._keys[key] = data def _collision_resolution(self, key, data=None): + """ + This method is a type of open addressing which is used for handling collision. + + In this implementation the concept of linear probing has been used. + + The hash table is searched sequentially from the original location of the + hash, if the new hash/location we get is already occupied we check for the next + hash/location. + + references: + - https://en.wikipedia.org/wiki/Linear_probing + + Examples: + 1. The collision will be with keys 18 & 99, so new hash will be created for 99 + >>> ht = HashTable(3) + >>> ht.insert_data(17) + >>> ht.insert_data(18) + >>> ht.insert_data(99) + >>> ht.keys() + {2: 17, 0: 18, 1: 99} + + 2. The collision will be with keys 17 & 101, so new hash + will be created for 101 + >>> ht = HashTable(4) + >>> ht.insert_data(17) + >>> ht.insert_data(18) + >>> ht.insert_data(99) + >>> ht.insert_data(101) + >>> ht.keys() + {1: 17, 2: 18, 3: 99, 0: 101} + + 2. The collision will be with all keys, so new hash will be created for all + >>> ht = HashTable(1) + >>> ht.insert_data(17) + >>> ht.insert_data(18) + >>> ht.insert_data(99) + >>> ht.keys() + {2: 17, 3: 18, 4: 99} + + 3. Trying to insert float key in hash + >>> ht = HashTable(1) + >>> ht.insert_data(17) + >>> ht.insert_data(18) + >>> ht.insert_data(99.99) + Traceback (most recent call last): + ... + TypeError: list indices must be integers or slices, not float + """ new_key = self.hash_function(key + 1) while self.values[new_key] is not None and self.values[new_key] != key: From e4eda145833565443be2e5ed4c805fbaaa9d964e Mon Sep 17 00:00:00 2001 From: Poojan Smart <44301271+PoojanSmart@users.noreply.github.com> Date: Fri, 27 Oct 2023 20:14:33 +0530 Subject: [PATCH 247/306] Add perplexity loss algorithm (#11028) --- machine_learning/loss_functions.py | 92 ++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) diff --git a/machine_learning/loss_functions.py b/machine_learning/loss_functions.py index ea1f390e3..36a760326 100644 --- a/machine_learning/loss_functions.py +++ b/machine_learning/loss_functions.py @@ -379,6 +379,98 @@ def mean_absolute_percentage_error( return np.mean(absolute_percentage_diff) +def perplexity_loss( + y_true: np.ndarray, y_pred: np.ndarray, epsilon: float = 1e-7 +) -> float: + """ + Calculate the perplexity for the y_true and y_pred. + + Compute the Perplexity which useful in predicting language model + accuracy in Natural Language Processing (NLP.) + Perplexity is measure of how certain the model in its predictions. + + Perplexity Loss = exp(-1/N (Σ ln(p(x))) + + Reference: + https://en.wikipedia.org/wiki/Perplexity + + Args: + y_true: Actual label encoded sentences of shape (batch_size, sentence_length) + y_pred: Predicted sentences of shape (batch_size, sentence_length, vocab_size) + epsilon: Small floating point number to avoid getting inf for log(0) + + Returns: + Perplexity loss between y_true and y_pred. + + >>> y_true = np.array([[1, 4], [2, 3]]) + >>> y_pred = np.array( + ... [[[0.28, 0.19, 0.21 , 0.15, 0.15], + ... [0.24, 0.19, 0.09, 0.18, 0.27]], + ... [[0.03, 0.26, 0.21, 0.18, 0.30], + ... [0.28, 0.10, 0.33, 0.15, 0.12]]] + ... ) + >>> perplexity_loss(y_true, y_pred) + 5.0247347775367945 + >>> y_true = np.array([[1, 4], [2, 3]]) + >>> y_pred = np.array( + ... [[[0.28, 0.19, 0.21 , 0.15, 0.15], + ... [0.24, 0.19, 0.09, 0.18, 0.27], + ... [0.30, 0.10, 0.20, 0.15, 0.25]], + ... [[0.03, 0.26, 0.21, 0.18, 0.30], + ... [0.28, 0.10, 0.33, 0.15, 0.12], + ... [0.30, 0.10, 0.20, 0.15, 0.25]],] + ... ) + >>> perplexity_loss(y_true, y_pred) + Traceback (most recent call last): + ... + ValueError: Sentence length of y_true and y_pred must be equal. + >>> y_true = np.array([[1, 4], [2, 11]]) + >>> y_pred = np.array( + ... [[[0.28, 0.19, 0.21 , 0.15, 0.15], + ... [0.24, 0.19, 0.09, 0.18, 0.27]], + ... [[0.03, 0.26, 0.21, 0.18, 0.30], + ... [0.28, 0.10, 0.33, 0.15, 0.12]]] + ... ) + >>> perplexity_loss(y_true, y_pred) + Traceback (most recent call last): + ... + ValueError: Label value must not be greater than vocabulary size. + >>> y_true = np.array([[1, 4]]) + >>> y_pred = np.array( + ... [[[0.28, 0.19, 0.21 , 0.15, 0.15], + ... [0.24, 0.19, 0.09, 0.18, 0.27]], + ... [[0.03, 0.26, 0.21, 0.18, 0.30], + ... [0.28, 0.10, 0.33, 0.15, 0.12]]] + ... ) + >>> perplexity_loss(y_true, y_pred) + Traceback (most recent call last): + ... + ValueError: Batch size of y_true and y_pred must be equal. + """ + + vocab_size = y_pred.shape[2] + + if y_true.shape[0] != y_pred.shape[0]: + raise ValueError("Batch size of y_true and y_pred must be equal.") + if y_true.shape[1] != y_pred.shape[1]: + raise ValueError("Sentence length of y_true and y_pred must be equal.") + if np.max(y_true) > vocab_size: + raise ValueError("Label value must not be greater than vocabulary size.") + + # Matrix to select prediction value only for true class + filter_matrix = np.array( + [[list(np.eye(vocab_size)[word]) for word in sentence] for sentence in y_true] + ) + + # Getting the matrix containing prediction for only true class + true_class_pred = np.sum(y_pred * filter_matrix, axis=2).clip(epsilon, 1) + + # Calculating perplexity for each sentence + perp_losses = np.exp(np.negative(np.mean(np.log(true_class_pred), axis=1))) + + return np.mean(perp_losses) + + if __name__ == "__main__": import doctest From f336cca8f8b2989d612068845f147ce885676148 Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Fri, 27 Oct 2023 22:10:42 +0530 Subject: [PATCH 248/306] Added doctest to double_hash.py (#11020) * Added doctest to double_hash.py * Update double_hash.py --- data_structures/hashing/double_hash.py | 33 ++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/data_structures/hashing/double_hash.py b/data_structures/hashing/double_hash.py index be21e74ca..76c6c8681 100644 --- a/data_structures/hashing/double_hash.py +++ b/data_structures/hashing/double_hash.py @@ -35,6 +35,33 @@ class DoubleHash(HashTable): return (increment * self.__hash_function_2(key, data)) % self.size_table def _collision_resolution(self, key, data=None): + """ + Examples: + + 1. Try to add three data elements when the size is three + >>> dh = DoubleHash(3) + >>> dh.insert_data(10) + >>> dh.insert_data(20) + >>> dh.insert_data(30) + >>> dh.keys() + {1: 10, 2: 20, 0: 30} + + 2. Try to add three data elements when the size is two + >>> dh = DoubleHash(2) + >>> dh.insert_data(10) + >>> dh.insert_data(20) + >>> dh.insert_data(30) + >>> dh.keys() + {10: 10, 9: 20, 8: 30} + + 3. Try to add three data elements when the size is four + >>> dh = DoubleHash(4) + >>> dh.insert_data(10) + >>> dh.insert_data(20) + >>> dh.insert_data(30) + >>> dh.keys() + {9: 20, 10: 10, 8: 30} + """ i = 1 new_key = self.hash_function(data) @@ -50,3 +77,9 @@ class DoubleHash(HashTable): i += 1 return new_key + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 185a35589ab14bf27f23266a25d8e1bcced646b2 Mon Sep 17 00:00:00 2001 From: Khushi Shukla Date: Fri, 27 Oct 2023 22:12:34 +0530 Subject: [PATCH 249/306] Create monotonic_array.py (#11025) * Create monotonic_array.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update monotonic_array.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/arrays/monotonic_array.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 data_structures/arrays/monotonic_array.py diff --git a/data_structures/arrays/monotonic_array.py b/data_structures/arrays/monotonic_array.py new file mode 100644 index 000000000..c50a21530 --- /dev/null +++ b/data_structures/arrays/monotonic_array.py @@ -0,0 +1,23 @@ +# https://leetcode.com/problems/monotonic-array/ +def is_monotonic(nums: list[int]) -> bool: + """ + Check if a list is monotonic. + + >>> is_monotonic([1, 2, 2, 3]) + True + >>> is_monotonic([6, 5, 4, 4]) + True + >>> is_monotonic([1, 3, 2]) + False + """ + return all(nums[i] <= nums[i + 1] for i in range(len(nums) - 1)) or all( + nums[i] >= nums[i + 1] for i in range(len(nums) - 1) + ) + + +# Test the function with your examples +if __name__ == "__main__": + # Test the function with your examples + print(is_monotonic([1, 2, 2, 3])) # Output: True + print(is_monotonic([6, 5, 4, 4])) # Output: True + print(is_monotonic([1, 3, 2])) # Output: False From b0837d39859452ed7bd6e5b7adbdf172f70228bf Mon Sep 17 00:00:00 2001 From: Adam Ross <14985050+R055A@users.noreply.github.com> Date: Fri, 27 Oct 2023 22:10:32 +0200 Subject: [PATCH 250/306] Increase code coverage for dijkstra algorithm (#10695) * Increase code coverage for dijkstra algorithm * Add missing code coverage Refactor to pass mypy * Fix missing code coverage * Remove code changes, keep doctest * Remove ALL of the code changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dijkstra_algorithm.py --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- graphs/dijkstra_algorithm.py | 313 +++++++++++++++++++++++++++++++++-- 1 file changed, 299 insertions(+), 14 deletions(-) diff --git a/graphs/dijkstra_algorithm.py b/graphs/dijkstra_algorithm.py index 452138fe9..2efa2cb63 100644 --- a/graphs/dijkstra_algorithm.py +++ b/graphs/dijkstra_algorithm.py @@ -11,35 +11,127 @@ import sys class PriorityQueue: # Based on Min Heap def __init__(self): + """ + Priority queue class constructor method. + + Examples: + >>> priority_queue_test = PriorityQueue() + >>> priority_queue_test.cur_size + 0 + >>> priority_queue_test.array + [] + >>> priority_queue_test.pos + {} + """ self.cur_size = 0 self.array = [] self.pos = {} # To store the pos of node in array def is_empty(self): + """ + Conditional boolean method to determine if the priority queue is empty or not. + + Examples: + >>> priority_queue_test = PriorityQueue() + >>> priority_queue_test.is_empty() + True + >>> priority_queue_test.insert((2, 'A')) + >>> priority_queue_test.is_empty() + False + """ return self.cur_size == 0 def min_heapify(self, idx): + """ + Sorts the queue array so that the minimum element is root. + + Examples: + >>> priority_queue_test = PriorityQueue() + >>> priority_queue_test.cur_size = 3 + >>> priority_queue_test.pos = {'A': 0, 'B': 1, 'C': 2} + + >>> priority_queue_test.array = [(5, 'A'), (10, 'B'), (15, 'C')] + >>> priority_queue_test.min_heapify(0) + Traceback (most recent call last): + ... + TypeError: 'list' object is not callable + >>> priority_queue_test.array + [(5, 'A'), (10, 'B'), (15, 'C')] + + >>> priority_queue_test.array = [(10, 'A'), (5, 'B'), (15, 'C')] + >>> priority_queue_test.min_heapify(0) + Traceback (most recent call last): + ... + TypeError: 'list' object is not callable + >>> priority_queue_test.array + [(10, 'A'), (5, 'B'), (15, 'C')] + + >>> priority_queue_test.array = [(10, 'A'), (15, 'B'), (5, 'C')] + >>> priority_queue_test.min_heapify(0) + Traceback (most recent call last): + ... + TypeError: 'list' object is not callable + >>> priority_queue_test.array + [(10, 'A'), (15, 'B'), (5, 'C')] + + >>> priority_queue_test.array = [(10, 'A'), (5, 'B')] + >>> priority_queue_test.cur_size = len(priority_queue_test.array) + >>> priority_queue_test.pos = {'A': 0, 'B': 1} + >>> priority_queue_test.min_heapify(0) + Traceback (most recent call last): + ... + TypeError: 'list' object is not callable + >>> priority_queue_test.array + [(10, 'A'), (5, 'B')] + """ lc = self.left(idx) rc = self.right(idx) - if lc < self.cur_size and self.array(lc)[0] < self.array(idx)[0]: + if lc < self.cur_size and self.array(lc)[0] < self.array[idx][0]: smallest = lc else: smallest = idx - if rc < self.cur_size and self.array(rc)[0] < self.array(smallest)[0]: + if rc < self.cur_size and self.array(rc)[0] < self.array[smallest][0]: smallest = rc if smallest != idx: self.swap(idx, smallest) self.min_heapify(smallest) def insert(self, tup): - # Inserts a node into the Priority Queue + """ + Inserts a node into the Priority Queue. + + Examples: + >>> priority_queue_test = PriorityQueue() + >>> priority_queue_test.insert((10, 'A')) + >>> priority_queue_test.array + [(10, 'A')] + >>> priority_queue_test.insert((15, 'B')) + >>> priority_queue_test.array + [(10, 'A'), (15, 'B')] + >>> priority_queue_test.insert((5, 'C')) + >>> priority_queue_test.array + [(5, 'C'), (10, 'A'), (15, 'B')] + """ self.pos[tup[1]] = self.cur_size self.cur_size += 1 self.array.append((sys.maxsize, tup[1])) self.decrease_key((sys.maxsize, tup[1]), tup[0]) def extract_min(self): - # Removes and returns the min element at top of priority queue + """ + Removes and returns the min element at top of priority queue. + + Examples: + >>> priority_queue_test = PriorityQueue() + >>> priority_queue_test.array = [(10, 'A'), (15, 'B')] + >>> priority_queue_test.cur_size = len(priority_queue_test.array) + >>> priority_queue_test.pos = {'A': 0, 'B': 1} + >>> priority_queue_test.insert((5, 'C')) + >>> priority_queue_test.extract_min() + 'C' + >>> priority_queue_test.array[0] + (15, 'B') + """ min_node = self.array[0][1] self.array[0] = self.array[self.cur_size - 1] self.cur_size -= 1 @@ -48,20 +140,61 @@ class PriorityQueue: return min_node def left(self, i): - # returns the index of left child + """ + Returns the index of left child + + Examples: + >>> priority_queue_test = PriorityQueue() + >>> priority_queue_test.left(0) + 1 + >>> priority_queue_test.left(1) + 3 + """ return 2 * i + 1 def right(self, i): - # returns the index of right child + """ + Returns the index of right child + + Examples: + >>> priority_queue_test = PriorityQueue() + >>> priority_queue_test.right(0) + 2 + >>> priority_queue_test.right(1) + 4 + """ return 2 * i + 2 def par(self, i): - # returns the index of parent + """ + Returns the index of parent + + Examples: + >>> priority_queue_test = PriorityQueue() + >>> priority_queue_test.par(1) + 0 + >>> priority_queue_test.par(2) + 1 + >>> priority_queue_test.par(4) + 2 + """ return math.floor(i / 2) def swap(self, i, j): - # swaps array elements at indices i and j - # update the pos{} + """ + Swaps array elements at indices i and j, update the pos{} + + Examples: + >>> priority_queue_test = PriorityQueue() + >>> priority_queue_test.array = [(10, 'A'), (15, 'B')] + >>> priority_queue_test.cur_size = len(priority_queue_test.array) + >>> priority_queue_test.pos = {'A': 0, 'B': 1} + >>> priority_queue_test.swap(0, 1) + >>> priority_queue_test.array + [(15, 'B'), (10, 'A')] + >>> priority_queue_test.pos + {'A': 1, 'B': 0} + """ self.pos[self.array[i][1]] = j self.pos[self.array[j][1]] = i temp = self.array[i] @@ -69,6 +202,18 @@ class PriorityQueue: self.array[j] = temp def decrease_key(self, tup, new_d): + """ + Decrease the key value for a given tuple, assuming the new_d is at most old_d. + + Examples: + >>> priority_queue_test = PriorityQueue() + >>> priority_queue_test.array = [(10, 'A'), (15, 'B')] + >>> priority_queue_test.cur_size = len(priority_queue_test.array) + >>> priority_queue_test.pos = {'A': 0, 'B': 1} + >>> priority_queue_test.decrease_key((10, 'A'), 5) + >>> priority_queue_test.array + [(5, 'A'), (15, 'B')] + """ idx = self.pos[tup[1]] # assuming the new_d is atmost old_d self.array[idx] = (new_d, tup[1]) @@ -79,6 +224,20 @@ class PriorityQueue: class Graph: def __init__(self, num): + """ + Graph class constructor + + Examples: + >>> graph_test = Graph(1) + >>> graph_test.num_nodes + 1 + >>> graph_test.dist + [0] + >>> graph_test.par + [-1] + >>> graph_test.adjList + {} + """ self.adjList = {} # To store graph: u -> (v,w) self.num_nodes = num # Number of nodes in graph # To store the distance from source vertex @@ -86,8 +245,16 @@ class Graph: self.par = [-1] * self.num_nodes # To store the path def add_edge(self, u, v, w): - # Edge going from node u to v and v to u with weight w - # u (w)-> v, v (w) -> u + """ + Add edge going from node u to v and v to u with weight w: u (w)-> v, v (w) -> u + + Examples: + >>> graph_test = Graph(1) + >>> graph_test.add_edge(1, 2, 1) + >>> graph_test.add_edge(2, 3, 2) + >>> graph_test.adjList + {1: [(2, 1)], 2: [(1, 1), (3, 2)], 3: [(2, 2)]} + """ # Check if u already in graph if u in self.adjList: self.adjList[u].append((v, w)) @@ -101,11 +268,99 @@ class Graph: self.adjList[v] = [(u, w)] def show_graph(self): - # u -> v(w) + """ + Show the graph: u -> v(w) + + Examples: + >>> graph_test = Graph(1) + >>> graph_test.add_edge(1, 2, 1) + >>> graph_test.show_graph() + 1 -> 2(1) + 2 -> 1(1) + >>> graph_test.add_edge(2, 3, 2) + >>> graph_test.show_graph() + 1 -> 2(1) + 2 -> 1(1) -> 3(2) + 3 -> 2(2) + """ for u in self.adjList: print(u, "->", " -> ".join(str(f"{v}({w})") for v, w in self.adjList[u])) def dijkstra(self, src): + """ + Dijkstra algorithm + + Examples: + >>> graph_test = Graph(3) + >>> graph_test.add_edge(0, 1, 2) + >>> graph_test.add_edge(1, 2, 2) + >>> graph_test.dijkstra(0) + Distance from node: 0 + Node 0 has distance: 0 + Node 1 has distance: 2 + Node 2 has distance: 4 + >>> graph_test.dist + [0, 2, 4] + + >>> graph_test = Graph(2) + >>> graph_test.add_edge(0, 1, 2) + >>> graph_test.dijkstra(0) + Distance from node: 0 + Node 0 has distance: 0 + Node 1 has distance: 2 + >>> graph_test.dist + [0, 2] + + >>> graph_test = Graph(3) + >>> graph_test.add_edge(0, 1, 2) + >>> graph_test.dijkstra(0) + Distance from node: 0 + Node 0 has distance: 0 + Node 1 has distance: 2 + Node 2 has distance: 0 + >>> graph_test.dist + [0, 2, 0] + + >>> graph_test = Graph(3) + >>> graph_test.add_edge(0, 1, 2) + >>> graph_test.add_edge(1, 2, 2) + >>> graph_test.add_edge(0, 2, 1) + >>> graph_test.dijkstra(0) + Distance from node: 0 + Node 0 has distance: 0 + Node 1 has distance: 2 + Node 2 has distance: 1 + >>> graph_test.dist + [0, 2, 1] + + >>> graph_test = Graph(4) + >>> graph_test.add_edge(0, 1, 4) + >>> graph_test.add_edge(1, 2, 2) + >>> graph_test.add_edge(2, 3, 1) + >>> graph_test.add_edge(0, 2, 3) + >>> graph_test.dijkstra(0) + Distance from node: 0 + Node 0 has distance: 0 + Node 1 has distance: 4 + Node 2 has distance: 3 + Node 3 has distance: 4 + >>> graph_test.dist + [0, 4, 3, 4] + + >>> graph_test = Graph(4) + >>> graph_test.add_edge(0, 1, 4) + >>> graph_test.add_edge(1, 2, 2) + >>> graph_test.add_edge(2, 3, 1) + >>> graph_test.add_edge(0, 2, 7) + >>> graph_test.dijkstra(0) + Distance from node: 0 + Node 0 has distance: 0 + Node 1 has distance: 4 + Node 2 has distance: 6 + Node 3 has distance: 7 + >>> graph_test.dist + [0, 4, 6, 7] + """ # Flush old junk values in par[] self.par = [-1] * self.num_nodes # src is the source node @@ -135,13 +390,40 @@ class Graph: self.show_distances(src) def show_distances(self, src): + """ + Show the distances from src to all other nodes in a graph + + Examples: + >>> graph_test = Graph(1) + >>> graph_test.show_distances(0) + Distance from node: 0 + Node 0 has distance: 0 + """ print(f"Distance from node: {src}") for u in range(self.num_nodes): print(f"Node {u} has distance: {self.dist[u]}") def show_path(self, src, dest): - # To show the shortest path from src to dest - # WARNING: Use it *after* calling dijkstra + """ + Shows the shortest path from src to dest. + WARNING: Use it *after* calling dijkstra. + + Examples: + >>> graph_test = Graph(4) + >>> graph_test.add_edge(0, 1, 1) + >>> graph_test.add_edge(1, 2, 2) + >>> graph_test.add_edge(2, 3, 3) + >>> graph_test.dijkstra(0) + Distance from node: 0 + Node 0 has distance: 0 + Node 1 has distance: 1 + Node 2 has distance: 3 + Node 3 has distance: 6 + >>> graph_test.show_path(0, 3) # doctest: +NORMALIZE_WHITESPACE + ----Path to reach 3 from 0---- + 0 -> 1 -> 2 -> 3 + Total cost of path: 6 + """ path = [] cost = 0 temp = dest @@ -167,6 +449,9 @@ class Graph: if __name__ == "__main__": + from doctest import testmod + + testmod() graph = Graph(9) graph.add_edge(0, 1, 4) graph.add_edge(0, 7, 8) From 0eb1825af2114c60792dc5cbd43ca1259ae95a24 Mon Sep 17 00:00:00 2001 From: RaymondDashWu <33266041+RaymondDashWu@users.noreply.github.com> Date: Fri, 27 Oct 2023 13:13:32 -0700 Subject: [PATCH 251/306] Tests for odd_even_transposition_parallel (#10926) * [ADD] tests for odd_even_transposition_parallel * adding another test because build failed 6 hrs * comment out all tests to see if it fails * list(range(10)[::-1]) test uncommented * [a, x, c] test uncommented * [1.9, 42.0, 2.8] test uncommented * [False, True, False] test uncommented * [1, 32.0, 9] test uncommented * [1, 32.0, 9] test uncommented * [-442, -98, -554, 266, -491, 985, -53, -529, 82, -429] test uncommented * test non global lock * [DEL] Testing multiple data types. Couldn't get doctest to work * [ADD] Comment on why non global process lock --- sorts/odd_even_transposition_parallel.py | 25 ++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/sorts/odd_even_transposition_parallel.py b/sorts/odd_even_transposition_parallel.py index 9e0d228bd..b8ab46df1 100644 --- a/sorts/odd_even_transposition_parallel.py +++ b/sorts/odd_even_transposition_parallel.py @@ -13,7 +13,8 @@ synchronization could be used. from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time -process_lock = Lock() +# NOTE This breaks testing on build runner. May work better locally +# process_lock = Lock() """ The function run by the processes that sorts the list @@ -28,7 +29,7 @@ resultPipe = the pipe used to send results back to main def oe_process(position, value, l_send, r_send, lr_cv, rr_cv, result_pipe): - global process_lock + process_lock = Lock() # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to @@ -72,6 +73,26 @@ arr = the list to be sorted def odd_even_transposition(arr): + """ + >>> odd_even_transposition(list(range(10)[::-1])) == sorted(list(range(10)[::-1])) + True + >>> odd_even_transposition(["a", "x", "c"]) == sorted(["x", "a", "c"]) + True + >>> odd_even_transposition([1.9, 42.0, 2.8]) == sorted([1.9, 42.0, 2.8]) + True + >>> odd_even_transposition([False, True, False]) == sorted([False, False, True]) + True + >>> odd_even_transposition([1, 32.0, 9]) == sorted([False, False, True]) + False + >>> odd_even_transposition([1, 32.0, 9]) == sorted([1.0, 32, 9.0]) + True + >>> unsorted_list = [-442, -98, -554, 266, -491, 985, -53, -529, 82, -429] + >>> odd_even_transposition(unsorted_list) == sorted(unsorted_list) + True + >>> unsorted_list = [-442, -98, -554, 266, -491, 985, -53, -529, 82, -429] + >>> odd_even_transposition(unsorted_list) == sorted(unsorted_list + [1]) + False + """ process_array_ = [] result_pipe = [] # initialize the list of pipes where the values will be retrieved From 5df16f11eb536f76b74d468de33114f25c2c9ac1 Mon Sep 17 00:00:00 2001 From: Tiela Rose Black-Law <26930264+tielarose@users.noreply.github.com> Date: Fri, 27 Oct 2023 14:13:51 -0700 Subject: [PATCH 252/306] Add doctest to hashes/hamming_code.py (#10961) --- hashes/hamming_code.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/hashes/hamming_code.py b/hashes/hamming_code.py index 8498ca920..4a6efcf23 100644 --- a/hashes/hamming_code.py +++ b/hashes/hamming_code.py @@ -77,6 +77,10 @@ def emitter_converter(size_par, data): >>> emitter_converter(4, "101010111111") ['1', '1', '1', '1', '0', '1', '0', '0', '1', '0', '1', '1', '1', '1', '1', '1'] + >>> emitter_converter(5, "101010111111") + Traceback (most recent call last): + ... + ValueError: size of parity don't match with size of data """ if size_par + len(data) <= 2**size_par - (len(data) - 1): raise ValueError("size of parity don't match with size of data") From a0e80a74c817c8edd35737d2fbf7d38dd71fa43d Mon Sep 17 00:00:00 2001 From: Sanket Nikam <77570082+SannketNikam@users.noreply.github.com> Date: Sat, 28 Oct 2023 02:47:58 +0530 Subject: [PATCH 253/306] Added Gradient Boosting Classifier (#10944) * Added Gradient Boosting Classifier * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gradient_boosting_classifier.py * Update gradient_boosting_classifier.py * Update gradient_boosting_classifier.py * Update gradient_boosting_classifier.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../gradient_boosting_classifier.py | 118 ++++++++++++++++++ 1 file changed, 118 insertions(+) create mode 100644 machine_learning/gradient_boosting_classifier.py diff --git a/machine_learning/gradient_boosting_classifier.py b/machine_learning/gradient_boosting_classifier.py new file mode 100644 index 000000000..2902394d8 --- /dev/null +++ b/machine_learning/gradient_boosting_classifier.py @@ -0,0 +1,118 @@ +import numpy as np +from sklearn.datasets import load_iris +from sklearn.metrics import accuracy_score +from sklearn.model_selection import train_test_split +from sklearn.tree import DecisionTreeRegressor + + +class GradientBoostingClassifier: + def __init__(self, n_estimators: int = 100, learning_rate: float = 0.1) -> None: + """ + Initialize a GradientBoostingClassifier. + + Parameters: + - n_estimators (int): The number of weak learners to train. + - learning_rate (float): The learning rate for updating the model. + + Attributes: + - n_estimators (int): The number of weak learners. + - learning_rate (float): The learning rate. + - models (list): A list to store the trained weak learners. + """ + self.n_estimators = n_estimators + self.learning_rate = learning_rate + self.models: list[tuple[DecisionTreeRegressor, float]] = [] + + def fit(self, features: np.ndarray, target: np.ndarray) -> None: + """ + Fit the GradientBoostingClassifier to the training data. + + Parameters: + - features (np.ndarray): The training features. + - target (np.ndarray): The target values. + + Returns: + None + + >>> import numpy as np + >>> from sklearn.datasets import load_iris + >>> clf = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1) + >>> iris = load_iris() + >>> X, y = iris.data, iris.target + >>> clf.fit(X, y) + >>> # Check if the model is trained + >>> len(clf.models) == 100 + True + """ + for _ in range(self.n_estimators): + # Calculate the pseudo-residuals + residuals = -self.gradient(target, self.predict(features)) + # Fit a weak learner (e.g., decision tree) to the residuals + model = DecisionTreeRegressor(max_depth=1) + model.fit(features, residuals) + # Update the model by adding the weak learner with a learning rate + self.models.append((model, self.learning_rate)) + + def predict(self, features: np.ndarray) -> np.ndarray: + """ + Make predictions on input data. + + Parameters: + - features (np.ndarray): The input data for making predictions. + + Returns: + - np.ndarray: An array of binary predictions (-1 or 1). + + >>> import numpy as np + >>> from sklearn.datasets import load_iris + >>> clf = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1) + >>> iris = load_iris() + >>> X, y = iris.data, iris.target + >>> clf.fit(X, y) + >>> y_pred = clf.predict(X) + >>> # Check if the predictions have the correct shape + >>> y_pred.shape == y.shape + True + """ + # Initialize predictions with zeros + predictions = np.zeros(features.shape[0]) + for model, learning_rate in self.models: + predictions += learning_rate * model.predict(features) + return np.sign(predictions) # Convert to binary predictions (-1 or 1) + + def gradient(self, target: np.ndarray, y_pred: np.ndarray) -> np.ndarray: + """ + Calculate the negative gradient (pseudo-residuals) for logistic loss. + + Parameters: + - target (np.ndarray): The target values. + - y_pred (np.ndarray): The predicted values. + + Returns: + - np.ndarray: An array of pseudo-residuals. + + >>> import numpy as np + >>> clf = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1) + >>> target = np.array([0, 1, 0, 1]) + >>> y_pred = np.array([0.2, 0.8, 0.3, 0.7]) + >>> residuals = clf.gradient(target, y_pred) + >>> # Check if residuals have the correct shape + >>> residuals.shape == target.shape + True + """ + return -target / (1 + np.exp(target * y_pred)) + + +if __name__ == "__main__": + iris = load_iris() + X, y = iris.data, iris.target + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=0.2, random_state=42 + ) + + clf = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1) + clf.fit(X_train, y_train) + + y_pred = clf.predict(X_test) + accuracy = accuracy_score(y_test, y_pred) + print(f"Accuracy: {accuracy:.2f}") From 1e1ee00782d300c22e3d7a425ace5d0c7cefb200 Mon Sep 17 00:00:00 2001 From: Manmita Das <34617961+manmita@users.noreply.github.com> Date: Sat, 28 Oct 2023 03:18:15 +0530 Subject: [PATCH 254/306] Excess 3 code (#11001) * added excess-3 code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated with fixes * updated with fixes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update excess_3_code.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- bit_manipulation/excess_3_code.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 bit_manipulation/excess_3_code.py diff --git a/bit_manipulation/excess_3_code.py b/bit_manipulation/excess_3_code.py new file mode 100644 index 000000000..7beaabd90 --- /dev/null +++ b/bit_manipulation/excess_3_code.py @@ -0,0 +1,27 @@ +def excess_3_code(number: int) -> str: + """ + Find excess-3 code of integer base 10. + Add 3 to all digits in a decimal number then convert to a binary-coded decimal. + https://en.wikipedia.org/wiki/Excess-3 + + >>> excess_3_code(0) + '0b0011' + >>> excess_3_code(3) + '0b0110' + >>> excess_3_code(2) + '0b0101' + >>> excess_3_code(20) + '0b01010011' + >>> excess_3_code(120) + '0b010001010011' + """ + num = "" + for digit in str(max(0, number)): + num += str(bin(int(digit) + 3))[2:].zfill(4) + return "0b" + num + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From f2436318cef6dba173282f83def4ebf7bd1d2aba Mon Sep 17 00:00:00 2001 From: Shreya123714 <95279016+Shreya123714@users.noreply.github.com> Date: Sun, 29 Oct 2023 00:02:12 +0530 Subject: [PATCH 255/306] Add FuzzySet Class for Triangular Fuzzy Sets (#11036) * Added Opertation for triangular fuzzy sets * Added Sources * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix the bug , for which test were failing * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add type hints and improve parameter names * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add Test For fuzzy_operations.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix the bug in fuzzy_operations.py * Add test_fuzzy_logic.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix the bug in fuzzy_operations.py & deleted test * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixed the typo error * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Again done * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * corrected wrong intendation due to which test fail * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * bug fixed * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add test_fuzzy_logic * Modified fuzzy_operations.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed the bug, made a FuzzySet dataclass * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Replaced assertEqual of unittest to assert python * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * lets see * Changed test * orderd the import statements * Add docstring and dataclass the FuzzySet * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fuzzy_operations.py * Delete fuzzy_logic/test_fuzzy_logic.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * https://en.wikipedia.org/wiki/Fuzzy_set --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- fuzzy_logic/fuzzy_operations.py | 195 ++++++++++++++++++++++++++++++++ 1 file changed, 195 insertions(+) create mode 100644 fuzzy_logic/fuzzy_operations.py diff --git a/fuzzy_logic/fuzzy_operations.py b/fuzzy_logic/fuzzy_operations.py new file mode 100644 index 000000000..e41cd2120 --- /dev/null +++ b/fuzzy_logic/fuzzy_operations.py @@ -0,0 +1,195 @@ +""" +By @Shreya123714 + +https://en.wikipedia.org/wiki/Fuzzy_set +""" + +from __future__ import annotations + +from dataclasses import dataclass + +import matplotlib.pyplot as plt +import numpy as np + + +@dataclass +class FuzzySet: + """ + A class for representing and manipulating triangular fuzzy sets. + Attributes: + name: The name or label of the fuzzy set. + left_boundary: The left boundary of the fuzzy set. + peak: The peak (central) value of the fuzzy set. + right_boundary: The right boundary of the fuzzy set. + Methods: + membership(x): Calculate the membership value of an input 'x' in the fuzzy set. + union(other): Calculate the union of this fuzzy set with another fuzzy set. + intersection(other): Calculate the intersection of this fuzzy set with another. + complement(): Calculate the complement (negation) of this fuzzy set. + plot(): Plot the membership function of the fuzzy set. + + >>> sheru = FuzzySet("Sheru", 0.4, 1, 0.6) + >>> sheru + FuzzySet(name='Sheru', left_boundary=0.4, peak=1, right_boundary=0.6) + >>> str(sheru) + 'Sheru: [0.4, 1, 0.6]' + + >>> siya = FuzzySet("Siya", 0.5, 1, 0.7) + >>> siya + FuzzySet(name='Siya', left_boundary=0.5, peak=1, right_boundary=0.7) + + # Complement Operation + >>> sheru.complement() + FuzzySet(name='¬Sheru', left_boundary=0.4, peak=0.6, right_boundary=0) + >>> siya.complement() # doctest: +NORMALIZE_WHITESPACE + FuzzySet(name='¬Siya', left_boundary=0.30000000000000004, peak=0.5, + right_boundary=0) + + # Intersection Operation + >>> siya.intersection(sheru) + FuzzySet(name='Siya ∩ Sheru', left_boundary=0.5, peak=0.6, right_boundary=1.0) + + # Membership Operation + >>> sheru.membership(0.5) + 0.16666666666666663 + >>> sheru.membership(0.6) + 0.0 + + # Union Operations + >>> siya.union(sheru) + FuzzySet(name='Siya ∪ Sheru', left_boundary=0.4, peak=0.7, right_boundary=1.0) + """ + + name: str + left_boundary: float + peak: float + right_boundary: float + + def __str__(self) -> str: + """ + >>> FuzzySet("fuzzy_set", 0.1, 0.2, 0.3) + FuzzySet(name='fuzzy_set', left_boundary=0.1, peak=0.2, right_boundary=0.3) + """ + return ( + f"{self.name}: [{self.left_boundary}, {self.peak}, {self.right_boundary}]" + ) + + def complement(self) -> FuzzySet: + """ + Calculate the complement (negation) of this fuzzy set. + Returns: + FuzzySet: A new fuzzy set representing the complement. + + >>> FuzzySet("fuzzy_set", 0.1, 0.2, 0.3).complement() + FuzzySet(name='¬fuzzy_set', left_boundary=0.7, peak=0.9, right_boundary=0.8) + """ + return FuzzySet( + f"¬{self.name}", + 1 - self.right_boundary, + 1 - self.left_boundary, + 1 - self.peak, + ) + + def intersection(self, other) -> FuzzySet: + """ + Calculate the intersection of this fuzzy set + with another fuzzy set. + Args: + other: Another fuzzy set to intersect with. + Returns: + A new fuzzy set representing the intersection. + + >>> FuzzySet("a", 0.1, 0.2, 0.3).intersection(FuzzySet("b", 0.4, 0.5, 0.6)) + FuzzySet(name='a ∩ b', left_boundary=0.4, peak=0.3, right_boundary=0.35) + """ + return FuzzySet( + f"{self.name} ∩ {other.name}", + max(self.left_boundary, other.left_boundary), + min(self.right_boundary, other.right_boundary), + (self.peak + other.peak) / 2, + ) + + def membership(self, x: float) -> float: + """ + Calculate the membership value of an input 'x' in the fuzzy set. + Returns: + The membership value of 'x' in the fuzzy set. + + >>> a = FuzzySet("a", 0.1, 0.2, 0.3) + >>> a.membership(0.09) + 0.0 + >>> a.membership(0.1) + 0.0 + >>> a.membership(0.11) + 0.09999999999999995 + >>> a.membership(0.4) + 0.0 + >>> FuzzySet("A", 0, 0.5, 1).membership(0.1) + 0.2 + >>> FuzzySet("B", 0.2, 0.7, 1).membership(0.6) + 0.8 + """ + if x <= self.left_boundary or x >= self.right_boundary: + return 0.0 + elif self.left_boundary < x <= self.peak: + return (x - self.left_boundary) / (self.peak - self.left_boundary) + elif self.peak < x < self.right_boundary: + return (self.right_boundary - x) / (self.right_boundary - self.peak) + msg = f"Invalid value {x} for fuzzy set {self}" + raise ValueError(msg) + + def union(self, other) -> FuzzySet: + """ + Calculate the union of this fuzzy set with another fuzzy set. + Args: + other (FuzzySet): Another fuzzy set to union with. + Returns: + FuzzySet: A new fuzzy set representing the union. + + >>> FuzzySet("a", 0.1, 0.2, 0.3).union(FuzzySet("b", 0.4, 0.5, 0.6)) + FuzzySet(name='a ∪ b', left_boundary=0.1, peak=0.6, right_boundary=0.35) + """ + return FuzzySet( + f"{self.name} ∪ {other.name}", + min(self.left_boundary, other.left_boundary), + max(self.right_boundary, other.right_boundary), + (self.peak + other.peak) / 2, + ) + + def plot(self): + """ + Plot the membership function of the fuzzy set. + """ + x = np.linspace(0, 1, 1000) + y = [self.membership(xi) for xi in x] + + plt.plot(x, y, label=self.name) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + a = FuzzySet("A", 0, 0.5, 1) + b = FuzzySet("B", 0.2, 0.7, 1) + + a.plot() + b.plot() + + plt.xlabel("x") + plt.ylabel("Membership") + plt.legend() + plt.show() + + union_ab = a.union(b) + intersection_ab = a.intersection(b) + complement_a = a.complement() + + union_ab.plot() + intersection_ab.plot() + complement_a.plot() + + plt.xlabel("x") + plt.ylabel("Membership") + plt.legend() + plt.show() From b51b833e0a0339421c76ee53662521689b1c9d62 Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Sun, 29 Oct 2023 01:13:20 +0530 Subject: [PATCH 256/306] Added doctest to heap.py (#11059) --- data_structures/heap/heap.py | 75 ++++++++++++++++++++++++++++++++++-- 1 file changed, 72 insertions(+), 3 deletions(-) diff --git a/data_structures/heap/heap.py b/data_structures/heap/heap.py index c1004f349..29bff3af0 100644 --- a/data_structures/heap/heap.py +++ b/data_structures/heap/heap.py @@ -81,6 +81,9 @@ class Heap(Generic[T]): def max_heapify(self, index: int) -> None: """ correct a single violation of the heap property in a subtree's root. + + It is the function that is responsible for restoring the property + of Max heap i.e the maximum element is always at top. """ if index < self.heap_size: violation: int = index @@ -99,7 +102,29 @@ class Heap(Generic[T]): self.max_heapify(violation) def build_max_heap(self, collection: Iterable[T]) -> None: - """build max heap from an unsorted array""" + """ + build max heap from an unsorted array + + >>> h = Heap() + >>> h.build_max_heap([20,40,50,20,10]) + >>> h + [50, 40, 20, 20, 10] + + >>> h = Heap() + >>> h.build_max_heap([1,2,3,4,5,6,7,8,9,0]) + >>> h + [9, 8, 7, 4, 5, 6, 3, 2, 1, 0] + + >>> h = Heap() + >>> h.build_max_heap([514,5,61,57,8,99,105]) + >>> h + [514, 57, 105, 5, 8, 99, 61] + + >>> h = Heap() + >>> h.build_max_heap([514,5,61.6,57,8,9.9,105]) + >>> h + [514, 57, 105, 5, 8, 9.9, 61.6] + """ self.h = list(collection) self.heap_size = len(self.h) if self.heap_size > 1: @@ -108,7 +133,24 @@ class Heap(Generic[T]): self.max_heapify(i) def extract_max(self) -> T: - """get and remove max from heap""" + """ + get and remove max from heap + + >>> h = Heap() + >>> h.build_max_heap([20,40,50,20,10]) + >>> h.extract_max() + 50 + + >>> h = Heap() + >>> h.build_max_heap([514,5,61,57,8,99,105]) + >>> h.extract_max() + 514 + + >>> h = Heap() + >>> h.build_max_heap([1,2,3,4,5,6,7,8,9,0]) + >>> h.extract_max() + 9 + """ if self.heap_size >= 2: me = self.h[0] self.h[0] = self.h.pop(-1) @@ -122,7 +164,34 @@ class Heap(Generic[T]): raise Exception("Empty heap") def insert(self, value: T) -> None: - """insert a new value into the max heap""" + """ + insert a new value into the max heap + + >>> h = Heap() + >>> h.insert(10) + >>> h + [10] + + >>> h = Heap() + >>> h.insert(10) + >>> h.insert(10) + >>> h + [10, 10] + + >>> h = Heap() + >>> h.insert(10) + >>> h.insert(10.1) + >>> h + [10.1, 10] + + >>> h = Heap() + >>> h.insert(0.1) + >>> h.insert(0) + >>> h.insert(9) + >>> h.insert(5) + >>> h + [9, 5, 0.1, 0] + """ self.h.append(value) idx = (self.heap_size - 1) // 2 self.heap_size += 1 From d80ee90178d48e530a2df3966fee3b5e06ec3ecc Mon Sep 17 00:00:00 2001 From: Khushi Shukla Date: Sun, 29 Oct 2023 02:43:14 +0530 Subject: [PATCH 257/306] Create crossword_puzzle_solver.py (#11011) * Create crossword_puzzle_solver.py * Update crossword_puzzle_solver.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update crossword_puzzle_solver.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update crossword_puzzle_solver.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update crossword_puzzle_solver.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update crossword_puzzle_solver.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update crossword_puzzle_solver.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update crossword_puzzle_solver.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update crossword_puzzle_solver.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update crossword_puzzle_solver.py * Update backtracking/crossword_puzzle_solver.py * Update crossword_puzzle_solver.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update crossword_puzzle_solver.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update crossword_puzzle_solver.py * Apply suggestions from code review * Update crossword_puzzle_solver.py * Update crossword_puzzle_solver.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- backtracking/crossword_puzzle_solver.py | 132 ++++++++++++++++++++++++ 1 file changed, 132 insertions(+) create mode 100644 backtracking/crossword_puzzle_solver.py diff --git a/backtracking/crossword_puzzle_solver.py b/backtracking/crossword_puzzle_solver.py new file mode 100644 index 000000000..b9c01c4ef --- /dev/null +++ b/backtracking/crossword_puzzle_solver.py @@ -0,0 +1,132 @@ +# https://www.geeksforgeeks.org/solve-crossword-puzzle/ + + +def is_valid( + puzzle: list[list[str]], word: str, row: int, col: int, vertical: bool +) -> bool: + """ + Check if a word can be placed at the given position. + + >>> puzzle = [ + ... ['', '', '', ''], + ... ['', '', '', ''], + ... ['', '', '', ''], + ... ['', '', '', ''] + ... ] + >>> is_valid(puzzle, 'word', 0, 0, True) + True + >>> puzzle = [ + ... ['', '', '', ''], + ... ['', '', '', ''], + ... ['', '', '', ''], + ... ['', '', '', ''] + ... ] + >>> is_valid(puzzle, 'word', 0, 0, False) + True + """ + for i in range(len(word)): + if vertical: + if row + i >= len(puzzle) or puzzle[row + i][col] != "": + return False + else: + if col + i >= len(puzzle[0]) or puzzle[row][col + i] != "": + return False + return True + + +def place_word( + puzzle: list[list[str]], word: str, row: int, col: int, vertical: bool +) -> None: + """ + Place a word at the given position. + + >>> puzzle = [ + ... ['', '', '', ''], + ... ['', '', '', ''], + ... ['', '', '', ''], + ... ['', '', '', ''] + ... ] + >>> place_word(puzzle, 'word', 0, 0, True) + >>> puzzle + [['w', '', '', ''], ['o', '', '', ''], ['r', '', '', ''], ['d', '', '', '']] + """ + for i, char in enumerate(word): + if vertical: + puzzle[row + i][col] = char + else: + puzzle[row][col + i] = char + + +def remove_word( + puzzle: list[list[str]], word: str, row: int, col: int, vertical: bool +) -> None: + """ + Remove a word from the given position. + + >>> puzzle = [ + ... ['w', '', '', ''], + ... ['o', '', '', ''], + ... ['r', '', '', ''], + ... ['d', '', '', ''] + ... ] + >>> remove_word(puzzle, 'word', 0, 0, True) + >>> puzzle + [['', '', '', ''], ['', '', '', ''], ['', '', '', ''], ['', '', '', '']] + """ + for i in range(len(word)): + if vertical: + puzzle[row + i][col] = "" + else: + puzzle[row][col + i] = "" + + +def solve_crossword(puzzle: list[list[str]], words: list[str]) -> bool: + """ + Solve the crossword puzzle using backtracking. + + >>> puzzle = [ + ... ['', '', '', ''], + ... ['', '', '', ''], + ... ['', '', '', ''], + ... ['', '', '', ''] + ... ] + + >>> words = ['word', 'four', 'more', 'last'] + >>> solve_crossword(puzzle, words) + True + >>> puzzle = [ + ... ['', '', '', ''], + ... ['', '', '', ''], + ... ['', '', '', ''], + ... ['', '', '', ''] + ... ] + >>> words = ['word', 'four', 'more', 'paragraphs'] + >>> solve_crossword(puzzle, words) + False + """ + for row in range(len(puzzle)): + for col in range(len(puzzle[0])): + if puzzle[row][col] == "": + for word in words: + for vertical in [True, False]: + if is_valid(puzzle, word, row, col, vertical): + place_word(puzzle, word, row, col, vertical) + words.remove(word) + if solve_crossword(puzzle, words): + return True + words.append(word) + remove_word(puzzle, word, row, col, vertical) + return False + return True + + +if __name__ == "__main__": + PUZZLE = [[""] * 3 for _ in range(3)] + WORDS = ["cat", "dog", "car"] + + if solve_crossword(PUZZLE, WORDS): + print("Solution found:") + for row in PUZZLE: + print(" ".join(row)) + else: + print("No solution found:") From 444dfb0a0f7b1e9b0b2f171b426dca26bcd1937a Mon Sep 17 00:00:00 2001 From: Ravi Kumar <119737193+ravi-ivar-7@users.noreply.github.com> Date: Sun, 29 Oct 2023 03:42:17 +0530 Subject: [PATCH 258/306] Added adams-bashforth method of order 2, 3, 4, 5 (#10969) * added runge kutta gills method * added adams-bashforth method of order 2, 3, 4, 5 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update adams_bashforth.py * Deleted extraneous file, maths/numerical_analysis/runge_kutta_gills.py * Added doctests to each function adams_bashforth.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update adams_bashforth.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/numerical_analysis/adams_bashforth.py | 230 ++++++++++++++++++++ 1 file changed, 230 insertions(+) create mode 100644 maths/numerical_analysis/adams_bashforth.py diff --git a/maths/numerical_analysis/adams_bashforth.py b/maths/numerical_analysis/adams_bashforth.py new file mode 100644 index 000000000..d61f022a4 --- /dev/null +++ b/maths/numerical_analysis/adams_bashforth.py @@ -0,0 +1,230 @@ +""" +Use the Adams-Bashforth methods to solve Ordinary Differential Equations. + +https://en.wikipedia.org/wiki/Linear_multistep_method +Author : Ravi Kumar +""" +from collections.abc import Callable +from dataclasses import dataclass + +import numpy as np + + +@dataclass +class AdamsBashforth: + """ + args: + func: An ordinary differential equation (ODE) as function of x and y. + x_initials: List containing initial required values of x. + y_initials: List containing initial required values of y. + step_size: The increment value of x. + x_final: The final value of x. + + Returns: Solution of y at each nodal point + + >>> def f(x, y): + ... return x + y + >>> AdamsBashforth(f, [0, 0.2, 0.4], [0, 0.2, 1], 0.2, 1) # doctest: +ELLIPSIS + AdamsBashforth(func=..., x_initials=[0, 0.2, 0.4], y_initials=[0, 0.2, 1], step...) + >>> AdamsBashforth(f, [0, 0.2, 1], [0, 0, 0.04], 0.2, 1).step_2() + Traceback (most recent call last): + ... + ValueError: The final value of x must be greater than the initial values of x. + + >>> AdamsBashforth(f, [0, 0.2, 0.3], [0, 0, 0.04], 0.2, 1).step_3() + Traceback (most recent call last): + ... + ValueError: x-values must be equally spaced according to step size. + + >>> AdamsBashforth(f,[0,0.2,0.4,0.6,0.8],[0,0,0.04,0.128,0.307],-0.2,1).step_5() + Traceback (most recent call last): + ... + ValueError: Step size must be positive. + """ + + func: Callable[[float, float], float] + x_initials: list[float] + y_initials: list[float] + step_size: float + x_final: float + + def __post_init__(self) -> None: + if self.x_initials[-1] >= self.x_final: + raise ValueError( + "The final value of x must be greater than the initial values of x." + ) + + if self.step_size <= 0: + raise ValueError("Step size must be positive.") + + if not all( + round(x1 - x0, 10) == self.step_size + for x0, x1 in zip(self.x_initials, self.x_initials[1:]) + ): + raise ValueError("x-values must be equally spaced according to step size.") + + def step_2(self) -> np.ndarray: + """ + >>> def f(x, y): + ... return x + >>> AdamsBashforth(f, [0, 0.2], [0, 0], 0.2, 1).step_2() + array([0. , 0. , 0.06, 0.16, 0.3 , 0.48]) + + >>> AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_2() + Traceback (most recent call last): + ... + ValueError: Insufficient initial points information. + """ + + if len(self.x_initials) != 2 or len(self.y_initials) != 2: + raise ValueError("Insufficient initial points information.") + + x_0, x_1 = self.x_initials[:2] + y_0, y_1 = self.y_initials[:2] + + n = int((self.x_final - x_1) / self.step_size) + y = np.zeros(n + 2) + y[0] = y_0 + y[1] = y_1 + + for i in range(n): + y[i + 2] = y[i + 1] + (self.step_size / 2) * ( + 3 * self.func(x_1, y[i + 1]) - self.func(x_0, y[i]) + ) + x_0 = x_1 + x_1 += self.step_size + + return y + + def step_3(self) -> np.ndarray: + """ + >>> def f(x, y): + ... return x + y + >>> y = AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_3() + >>> y[3] + 0.15533333333333332 + + >>> AdamsBashforth(f, [0, 0.2], [0, 0], 0.2, 1).step_3() + Traceback (most recent call last): + ... + ValueError: Insufficient initial points information. + """ + if len(self.x_initials) != 3 or len(self.y_initials) != 3: + raise ValueError("Insufficient initial points information.") + + x_0, x_1, x_2 = self.x_initials[:3] + y_0, y_1, y_2 = self.y_initials[:3] + + n = int((self.x_final - x_2) / self.step_size) + y = np.zeros(n + 4) + y[0] = y_0 + y[1] = y_1 + y[2] = y_2 + + for i in range(n + 1): + y[i + 3] = y[i + 2] + (self.step_size / 12) * ( + 23 * self.func(x_2, y[i + 2]) + - 16 * self.func(x_1, y[i + 1]) + + 5 * self.func(x_0, y[i]) + ) + x_0 = x_1 + x_1 = x_2 + x_2 += self.step_size + + return y + + def step_4(self) -> np.ndarray: + """ + >>> def f(x,y): + ... return x + y + >>> y = AdamsBashforth( + ... f, [0, 0.2, 0.4, 0.6], [0, 0, 0.04, 0.128], 0.2, 1).step_4() + >>> y[4] + 0.30699999999999994 + >>> y[5] + 0.5771083333333333 + + >>> AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_4() + Traceback (most recent call last): + ... + ValueError: Insufficient initial points information. + """ + + if len(self.x_initials) != 4 or len(self.y_initials) != 4: + raise ValueError("Insufficient initial points information.") + + x_0, x_1, x_2, x_3 = self.x_initials[:4] + y_0, y_1, y_2, y_3 = self.y_initials[:4] + + n = int((self.x_final - x_3) / self.step_size) + y = np.zeros(n + 4) + y[0] = y_0 + y[1] = y_1 + y[2] = y_2 + y[3] = y_3 + + for i in range(n): + y[i + 4] = y[i + 3] + (self.step_size / 24) * ( + 55 * self.func(x_3, y[i + 3]) + - 59 * self.func(x_2, y[i + 2]) + + 37 * self.func(x_1, y[i + 1]) + - 9 * self.func(x_0, y[i]) + ) + x_0 = x_1 + x_1 = x_2 + x_2 = x_3 + x_3 += self.step_size + + return y + + def step_5(self) -> np.ndarray: + """ + >>> def f(x,y): + ... return x + y + >>> y = AdamsBashforth( + ... f, [0, 0.2, 0.4, 0.6, 0.8], [0, 0.02140, 0.02140, 0.22211, 0.42536], + ... 0.2, 1).step_5() + >>> y[-1] + 0.05436839444444452 + + >>> AdamsBashforth(f, [0, 0.2, 0.4], [0, 0, 0.04], 0.2, 1).step_5() + Traceback (most recent call last): + ... + ValueError: Insufficient initial points information. + """ + + if len(self.x_initials) != 5 or len(self.y_initials) != 5: + raise ValueError("Insufficient initial points information.") + + x_0, x_1, x_2, x_3, x_4 = self.x_initials[:5] + y_0, y_1, y_2, y_3, y_4 = self.y_initials[:5] + + n = int((self.x_final - x_4) / self.step_size) + y = np.zeros(n + 6) + y[0] = y_0 + y[1] = y_1 + y[2] = y_2 + y[3] = y_3 + y[4] = y_4 + + for i in range(n + 1): + y[i + 5] = y[i + 4] + (self.step_size / 720) * ( + 1901 * self.func(x_4, y[i + 4]) + - 2774 * self.func(x_3, y[i + 3]) + - 2616 * self.func(x_2, y[i + 2]) + - 1274 * self.func(x_1, y[i + 1]) + + 251 * self.func(x_0, y[i]) + ) + x_0 = x_1 + x_1 = x_2 + x_2 = x_3 + x_3 = x_4 + x_4 += self.step_size + + return y + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From aa5c97d72c2382ed07c54b17d0b0d74684ca4734 Mon Sep 17 00:00:00 2001 From: Tapas Singhal <98687345+Shocker-lov-t@users.noreply.github.com> Date: Sun, 29 Oct 2023 04:17:46 +0530 Subject: [PATCH 259/306] Create ipv4_conversion.py (#11008) * Create ipconversion.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update conversions/ipconversion.py * Update ipconversion.py * Rename ipconversion.py to ipv4_conversion.py * forward_propagation(32, 450_000) # Was 10_000_000 --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- conversions/ipv4_conversion.py | 85 +++++++++++++++++++++++++ neural_network/simple_neural_network.py | 2 +- 2 files changed, 86 insertions(+), 1 deletion(-) create mode 100644 conversions/ipv4_conversion.py diff --git a/conversions/ipv4_conversion.py b/conversions/ipv4_conversion.py new file mode 100644 index 000000000..862309b72 --- /dev/null +++ b/conversions/ipv4_conversion.py @@ -0,0 +1,85 @@ +# https://www.geeksforgeeks.org/convert-ip-address-to-integer-and-vice-versa/ + + +def ipv4_to_decimal(ipv4_address: str) -> int: + """ + Convert an IPv4 address to its decimal representation. + + Args: + ip_address: A string representing an IPv4 address (e.g., "192.168.0.1"). + + Returns: + int: The decimal representation of the IP address. + + >>> ipv4_to_decimal("192.168.0.1") + 3232235521 + >>> ipv4_to_decimal("10.0.0.255") + 167772415 + >>> ipv4_to_decimal("10.0.255") + Traceback (most recent call last): + ... + ValueError: Invalid IPv4 address format + >>> ipv4_to_decimal("10.0.0.256") + Traceback (most recent call last): + ... + ValueError: Invalid IPv4 octet 256 + """ + + octets = [int(octet) for octet in ipv4_address.split(".")] + if len(octets) != 4: + raise ValueError("Invalid IPv4 address format") + + decimal_ipv4 = 0 + for octet in octets: + if not 0 <= octet <= 255: + raise ValueError(f"Invalid IPv4 octet {octet}") # noqa: EM102 + decimal_ipv4 = (decimal_ipv4 << 8) + int(octet) + + return decimal_ipv4 + + +def alt_ipv4_to_decimal(ipv4_address: str) -> int: + """ + >>> alt_ipv4_to_decimal("192.168.0.1") + 3232235521 + >>> alt_ipv4_to_decimal("10.0.0.255") + 167772415 + """ + return int("0x" + "".join(f"{int(i):02x}" for i in ipv4_address.split(".")), 16) + + +def decimal_to_ipv4(decimal_ipv4: int) -> str: + """ + Convert a decimal representation of an IP address to its IPv4 format. + + Args: + decimal_ipv4: An integer representing the decimal IP address. + + Returns: + The IPv4 representation of the decimal IP address. + + >>> decimal_to_ipv4(3232235521) + '192.168.0.1' + >>> decimal_to_ipv4(167772415) + '10.0.0.255' + >>> decimal_to_ipv4(-1) + Traceback (most recent call last): + ... + ValueError: Invalid decimal IPv4 address + """ + + if not (0 <= decimal_ipv4 <= 4294967295): + raise ValueError("Invalid decimal IPv4 address") + + ip_parts = [] + for _ in range(4): + ip_parts.append(str(decimal_ipv4 & 255)) + decimal_ipv4 >>= 8 + + return ".".join(reversed(ip_parts)) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() diff --git a/neural_network/simple_neural_network.py b/neural_network/simple_neural_network.py index f2a323487..8751a3890 100644 --- a/neural_network/simple_neural_network.py +++ b/neural_network/simple_neural_network.py @@ -28,7 +28,7 @@ INITIAL_VALUE = 0.02 def forward_propagation(expected: int, number_propagations: int) -> float: """Return the value found after the forward propagation training. - >>> res = forward_propagation(32, 10000000) + >>> res = forward_propagation(32, 450_000) # Was 10_000_000 >>> res > 31 and res < 33 True From e3eb9daba41512280dd54205c532874ccd2f1b91 Mon Sep 17 00:00:00 2001 From: Ed Date: Sat, 28 Oct 2023 15:48:50 -0700 Subject: [PATCH 260/306] Add bitap_string_match algo (#11060) * Add bitap_string_match algo * Fix types * Fix spelling and add ignore word * Add suggested changes and change return type * Resolve suggestions --- pyproject.toml | 2 +- strings/bitap_string_match.py | 79 +++++++++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+), 1 deletion(-) create mode 100644 strings/bitap_string_match.py diff --git a/pyproject.toml b/pyproject.toml index 790a328b3..5d27142d1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -135,5 +135,5 @@ omit = [ sort = "Cover" [tool.codespell] -ignore-words-list = "3rt,ans,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" +ignore-words-list = "3rt,ans,bitap,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" diff --git a/strings/bitap_string_match.py b/strings/bitap_string_match.py new file mode 100644 index 000000000..bd8a0f0d7 --- /dev/null +++ b/strings/bitap_string_match.py @@ -0,0 +1,79 @@ +""" +Bitap exact string matching +https://en.wikipedia.org/wiki/Bitap_algorithm + +Searches for a pattern inside text, and returns the index of the first occurrence +of the pattern. Both text and pattern consist of lowercase alphabetical characters only. + +Complexity: O(m*n) + n = length of text + m = length of pattern + +Python doctests can be run using this command: +python3 -m doctest -v bitap_string_match.py +""" + + +def bitap_string_match(text: str, pattern: str) -> int: + """ + Retrieves the index of the first occurrence of pattern in text. + + Args: + text: A string consisting only of lowercase alphabetical characters. + pattern: A string consisting only of lowercase alphabetical characters. + + Returns: + int: The index where pattern first occurs. Return -1 if not found. + + >>> bitap_string_match('abdabababc', 'ababc') + 5 + >>> bitap_string_match('aaaaaaaaaaaaaaaaaa', 'a') + 0 + >>> bitap_string_match('zxywsijdfosdfnso', 'zxywsijdfosdfnso') + 0 + >>> bitap_string_match('abdabababc', '') + 0 + >>> bitap_string_match('abdabababc', 'c') + 9 + >>> bitap_string_match('abdabababc', 'fofosdfo') + -1 + >>> bitap_string_match('abdab', 'fofosdfo') + -1 + """ + if not pattern: + return 0 + m = len(pattern) + if m > len(text): + return -1 + + # Initial state of bit string 1110 + state = ~1 + # Bit = 0 if character appears at index, and 1 otherwise + pattern_mask: list[int] = [~0] * 27 # 1111 + + for i, char in enumerate(pattern): + # For the pattern mask for this character, set the bit to 0 for each i + # the character appears. + pattern_index: int = ord(char) - ord("a") + pattern_mask[pattern_index] &= ~(1 << i) + + for i, char in enumerate(text): + text_index = ord(char) - ord("a") + # If this character does not appear in pattern, it's pattern mask is 1111. + # Performing a bitwise OR between state and 1111 will reset the state to 1111 + # and start searching the start of pattern again. + state |= pattern_mask[text_index] + state <<= 1 + + # If the mth bit (counting right to left) of the state is 0, then we have + # found pattern in text + if (state & (1 << m)) == 0: + return i - m + 1 + + return -1 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 50195616817983e8c820daf41c252ecbabac0ae2 Mon Sep 17 00:00:00 2001 From: Tapas Singhal <98687345+Shocker-lov-t@users.noreply.github.com> Date: Sun, 29 Oct 2023 13:12:32 +0530 Subject: [PATCH 261/306] Create multiplexer.py (#11064) * Create multiplexer.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Doctests should show how the algorithm fails --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- boolean_algebra/multiplexer.py | 42 ++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 boolean_algebra/multiplexer.py diff --git a/boolean_algebra/multiplexer.py b/boolean_algebra/multiplexer.py new file mode 100644 index 000000000..7e65c785c --- /dev/null +++ b/boolean_algebra/multiplexer.py @@ -0,0 +1,42 @@ +def mux(input0: int, input1: int, select: int) -> int: + """ + Implement a 2-to-1 Multiplexer. + + :param input0: The first input value (0 or 1). + :param input1: The second input value (0 or 1). + :param select: The select signal (0 or 1) to choose between input0 and input1. + :return: The output based on the select signal. input1 if select else input0. + + https://www.electrically4u.com/solved-problems-on-multiplexer + https://en.wikipedia.org/wiki/Multiplexer + + >>> mux(0, 1, 0) + 0 + >>> mux(0, 1, 1) + 1 + >>> mux(1, 0, 0) + 1 + >>> mux(1, 0, 1) + 0 + >>> mux(2, 1, 0) + Traceback (most recent call last): + ... + ValueError: Inputs and select signal must be 0 or 1 + >>> mux(0, -1, 0) + Traceback (most recent call last): + ... + ValueError: Inputs and select signal must be 0 or 1 + >>> mux(0, 1, 1.1) + Traceback (most recent call last): + ... + ValueError: Inputs and select signal must be 0 or 1 + """ + if all(i in (0, 1) for i in (input0, input1, select)): + return input1 if select else input0 + raise ValueError("Inputs and select signal must be 0 or 1") + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From bad39cd15439f4adeab06707c7ceab2de85adb7f Mon Sep 17 00:00:00 2001 From: ojas wani <52542740+ojas-wani@users.noreply.github.com> Date: Sun, 29 Oct 2023 02:37:07 -0700 Subject: [PATCH 262/306] Add more doctest to intro_sort.py #9943 (#11068) * added laplacian_filter file * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * required changes to laplacian file * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * changed laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update laplacian_filter.py * update laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed laplacian_filter.py * changed laplacian_filter.py * changed laplacian_filter.py * add matrix_multiplication.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update matrix_multiplication * update matrix_multiplication * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * make changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * update * updates * resolve conflict * add doctest * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * make changes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update laplacian.py * add doctests * more doctest added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * try to resolve ruff error * try to reslve ruff error * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update doctest * attemp - resolve ruff error * resolve build error * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * resolve build issue * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update build * doctest update * update doctest * update doctest * update doctest * fix ruff error * file location changed * Delete digital_image_processing/filters/laplacian_filter.py * Create laplacian_filter.py * Update matrix_multiplication_recursion.py * Update matrix_multiplication_recursion.py * Update matrix_multiplication_recursion.py * Update matrix_multiplication_recursion.py * Update matrix_multiplication_recursion.py * Add doctest to median_of_3 * add doctest to median_of_3 function * Update intro_sort.py * Update sorts/intro_sort.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- sorts/intro_sort.py | 31 +++++++++++-------------------- 1 file changed, 11 insertions(+), 20 deletions(-) diff --git a/sorts/intro_sort.py b/sorts/intro_sort.py index f0e3645ad..908d28865 100644 --- a/sorts/intro_sort.py +++ b/sorts/intro_sort.py @@ -1,5 +1,5 @@ """ -Introspective Sort is hybrid sort (Quick Sort + Heap Sort + Insertion Sort) +Introspective Sort is a hybrid sort (Quick Sort + Heap Sort + Insertion Sort) if the size of the list is under 16, use insertion sort https://en.wikipedia.org/wiki/Introsort """ @@ -9,7 +9,6 @@ import math def insertion_sort(array: list, start: int = 0, end: int = 0) -> list: """ >>> array = [4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12] - >>> insertion_sort(array, 0, len(array)) [1, 2, 4, 6, 7, 8, 8, 12, 14, 14, 22, 23, 27, 45, 56, 79] """ @@ -27,8 +26,7 @@ def insertion_sort(array: list, start: int = 0, end: int = 0) -> list: def heapify(array: list, index: int, heap_size: int) -> None: # Max Heap """ >>> array = [4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12] - - >>> heapify(array, len(array) // 2 ,len(array)) + >>> heapify(array, len(array) // 2, len(array)) """ largest = index left_index = 2 * index + 1 # Left Node @@ -47,9 +45,7 @@ def heapify(array: list, index: int, heap_size: int) -> None: # Max Heap def heap_sort(array: list) -> list: """ - >>> array = [4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12] - - >>> heap_sort(array) + >>> heap_sort([4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12]) [1, 2, 4, 6, 7, 8, 8, 12, 14, 14, 22, 23, 27, 45, 56, 79] """ n = len(array) @@ -69,9 +65,14 @@ def median_of_3( ) -> int: """ >>> array = [4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12] - - >>> median_of_3(array, 0, 0 + ((len(array) - 0) // 2) + 1, len(array) - 1) + >>> median_of_3(array, 0, ((len(array) - 0) // 2) + 1, len(array) - 1) 12 + >>> array = [13, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12] + >>> median_of_3(array, 0, ((len(array) - 0) // 2) + 1, len(array) - 1) + 13 + >>> array = [4, 2, 6, 8, 1, 7, 8, 22, 15, 14, 27, 79, 23, 45, 14, 16] + >>> median_of_3(array, 0, ((len(array) - 0) // 2) + 1, len(array) - 1) + 14 """ if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] @@ -88,7 +89,6 @@ def median_of_3( def partition(array: list, low: int, high: int, pivot: int) -> int: """ >>> array = [4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12] - >>> partition(array, 0, len(array), 12) 8 """ @@ -115,22 +115,16 @@ def sort(array: list) -> list: Examples: >>> sort([4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12]) [1, 2, 4, 6, 7, 8, 8, 12, 14, 14, 22, 23, 27, 45, 56, 79] - >>> sort([-1, -5, -3, -13, -44]) [-44, -13, -5, -3, -1] - >>> sort([]) [] - >>> sort([5]) [5] - >>> sort([-3, 0, -7, 6, 23, -34]) [-34, -7, -3, 0, 6, 23] - >>> sort([1.7, 1.0, 3.3, 2.1, 0.3 ]) [0.3, 1.0, 1.7, 2.1, 3.3] - >>> sort(['d', 'a', 'b', 'e', 'c']) ['a', 'b', 'c', 'd', 'e'] """ @@ -146,9 +140,7 @@ def intro_sort( ) -> list: """ >>> array = [4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12] - >>> max_depth = 2 * math.ceil(math.log2(len(array))) - >>> intro_sort(array, 0, len(array), 16, max_depth) [1, 2, 4, 6, 7, 8, 8, 12, 14, 14, 22, 23, 27, 45, 56, 79] """ @@ -167,7 +159,6 @@ if __name__ == "__main__": import doctest doctest.testmod() - user_input = input("Enter numbers separated by a comma : ").strip() unsorted = [float(item) for item in user_input.split(",")] - print(sort(unsorted)) + print(f"{sort(unsorted) = }") From adb13a106389aa2382a6315e9f008f9f855a89f8 Mon Sep 17 00:00:00 2001 From: SEIKH NABAB UDDIN <93948993+nababuddin@users.noreply.github.com> Date: Sun, 29 Oct 2023 15:22:50 +0530 Subject: [PATCH 263/306] Update instagram_pic.py (#10957) * Update instagram_pic.py * Update instagram_pic.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update instagram_pic.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update instagram_pic.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update instagram_pic.py * Update instagram_pic.py * Update instagram_pic.py * Update instagram_pic.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update instagram_pic.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update instagram_pic.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update instagram_pic.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fast fail instead of nested ifs and PEP8: Keep try/except blocks small * Update instagram_pic.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- web_programming/instagram_pic.py | 51 +++++++++++++++++++++++++------- 1 file changed, 41 insertions(+), 10 deletions(-) diff --git a/web_programming/instagram_pic.py b/web_programming/instagram_pic.py index 8521da674..2630c8659 100644 --- a/web_programming/instagram_pic.py +++ b/web_programming/instagram_pic.py @@ -3,14 +3,45 @@ from datetime import datetime import requests from bs4 import BeautifulSoup -if __name__ == "__main__": - url = input("Enter image url: ").strip() - print(f"Downloading image from {url} ...") - soup = BeautifulSoup(requests.get(url).content, "html.parser") - # The image URL is in the content field of the first meta tag with property og:image - image_url = soup.find("meta", {"property": "og:image"})["content"] - image_data = requests.get(image_url).content + +def download_image(url: str) -> str: + """ + Download an image from a given URL by scraping the 'og:image' meta tag. + + Parameters: + url: The URL to scrape. + + Returns: + A message indicating the result of the operation. + """ + try: + response = requests.get(url) + response.raise_for_status() + except requests.exceptions.RequestException as e: + return f"An error occurred during the HTTP request to {url}: {e!r}" + + soup = BeautifulSoup(response.text, "html.parser") + image_meta_tag = soup.find("meta", {"property": "og:image"}) + if not image_meta_tag: + return "No meta tag with property 'og:image' was found." + + image_url = image_meta_tag.get("content") + if not image_url: + return f"Image URL not found in meta tag {image_meta_tag}." + + try: + image_data = requests.get(image_url).content + except requests.exceptions.RequestException as e: + return f"An error occurred during the HTTP request to {image_url}: {e!r}" + if not image_data: + return f"Failed to download the image from {image_url}." + file_name = f"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg" - with open(file_name, "wb") as fp: - fp.write(image_data) - print(f"Done. Image saved to disk as {file_name}.") + with open(file_name, "wb") as out_file: + out_file.write(image_data) + return f"Image downloaded and saved in the file {file_name}" + + +if __name__ == "__main__": + url = input("Enter image URL: ").strip() or "https://www.instagram.com" + print(f"download_image({url}): {download_image(url)}") From 8217f9bd35e5975e3660217b37b2aac62c1280da Mon Sep 17 00:00:00 2001 From: Tapas Singhal <98687345+Shocker-lov-t@users.noreply.github.com> Date: Sun, 29 Oct 2023 15:55:39 +0530 Subject: [PATCH 264/306] Create find_previous_power_of_two.py (#11004) * Create find_previous_power_of_two.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update find_previous_power_of_two.py This change avoids the unnecessary left shift operation * Update find_previous_power_of_two.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../find_previous_power_of_two.py | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 bit_manipulation/find_previous_power_of_two.py diff --git a/bit_manipulation/find_previous_power_of_two.py b/bit_manipulation/find_previous_power_of_two.py new file mode 100644 index 000000000..8ac74ac98 --- /dev/null +++ b/bit_manipulation/find_previous_power_of_two.py @@ -0,0 +1,30 @@ +def find_previous_power_of_two(number: int) -> int: + """ + Find the largest power of two that is less than or equal to a given integer. + https://stackoverflow.com/questions/1322510 + + >>> [find_previous_power_of_two(i) for i in range(18)] + [0, 1, 2, 2, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16, 16] + >>> find_previous_power_of_two(-5) + Traceback (most recent call last): + ... + ValueError: Input must be a non-negative integer + >>> find_previous_power_of_two(10.5) + Traceback (most recent call last): + ... + ValueError: Input must be a non-negative integer + """ + if not isinstance(number, int) or number < 0: + raise ValueError("Input must be a non-negative integer") + if number == 0: + return 0 + power = 1 + while power <= number: + power <<= 1 # Equivalent to multiplying by 2 + return power >> 1 if number > 1 else 1 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 403d3b8a29e754b8f3bbb1000a54fee42a87341b Mon Sep 17 00:00:00 2001 From: Aqib Javid Bhat Date: Sun, 29 Oct 2023 16:28:28 +0530 Subject: [PATCH 265/306] Add Integer Square Root Algorithm (#10949) * Add Integer Square Root Algorithm * Update integer_square_root.py * Update integer_square_root.py --------- Co-authored-by: Christian Clauss --- maths/integer_square_root.py | 73 ++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 maths/integer_square_root.py diff --git a/maths/integer_square_root.py b/maths/integer_square_root.py new file mode 100644 index 000000000..27e874a43 --- /dev/null +++ b/maths/integer_square_root.py @@ -0,0 +1,73 @@ +""" +Integer Square Root Algorithm -- An efficient method to calculate the square root of a +non-negative integer 'num' rounded down to the nearest integer. It uses a binary search +approach to find the integer square root without using any built-in exponent functions +or operators. +* https://en.wikipedia.org/wiki/Integer_square_root +* https://docs.python.org/3/library/math.html#math.isqrt +Note: + - This algorithm is designed for non-negative integers only. + - The result is rounded down to the nearest integer. + - The algorithm has a time complexity of O(log(x)). + - Original algorithm idea based on binary search. +""" + + +def integer_square_root(num: int) -> int: + """ + Returns the integer square root of a non-negative integer num. + Args: + num: A non-negative integer. + Returns: + The integer square root of num. + Raises: + ValueError: If num is not an integer or is negative. + >>> [integer_square_root(i) for i in range(18)] + [0, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4] + >>> integer_square_root(625) + 25 + >>> integer_square_root(2_147_483_647) + 46340 + >>> from math import isqrt + >>> all(integer_square_root(i) == isqrt(i) for i in range(20)) + True + >>> integer_square_root(-1) + Traceback (most recent call last): + ... + ValueError: num must be non-negative integer + >>> integer_square_root(1.5) + Traceback (most recent call last): + ... + ValueError: num must be non-negative integer + >>> integer_square_root("0") + Traceback (most recent call last): + ... + ValueError: num must be non-negative integer + """ + if not isinstance(num, int) or num < 0: + raise ValueError("num must be non-negative integer") + + if num < 2: + return num + + left_bound = 0 + right_bound = num // 2 + + while left_bound <= right_bound: + mid = left_bound + (right_bound - left_bound) // 2 + mid_squared = mid * mid + if mid_squared == num: + return mid + + if mid_squared < num: + left_bound = mid + 1 + else: + right_bound = mid - 1 + + return right_bound + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From cc22d0b0bac9fec13913ba07bc67d58c06482c83 Mon Sep 17 00:00:00 2001 From: aayushsoni4 <120650736+aayushsoni4@users.noreply.github.com> Date: Sun, 29 Oct 2023 18:25:31 +0530 Subject: [PATCH 266/306] Generate parentheses (#10903) * Add: Matrix Prefix Sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Matrix Prefix Sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Matrix Prefix Sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Matrix Prefix Sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Matrix Prefix Sum * Changes made in Matrix Prefix Sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Matrix Prefix Sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Matrix Prefix Sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Matrix Prefix Sum * Changes made in Matrix Prefix Sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Matrix Prefix Sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Matrix Prefix Sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Matrix Prefix Sum * Changes made in Matrix Prefix Sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Matrix Prefix Sum * Changes made in Matrix Prefix Sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Matrix Prefix Sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Matrix Prefix Sum * Add: Distinct Subsequences * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Distinct Subsequences * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Changes made in Distinct Subsequences * Changes made in Distinct Subsequences * Changes made in Distinct Subsequences * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Removed Distinct Subsequences * Add: Generate Parentheses * Add: Generate Parentheses * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add: Generate Parentheses * Add: Generate Parentheses * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add: Generate Parentheses * Add: Generate Parentheses * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update backtracking/generate_parentheses.py * Delete matrix/matrix_prefix_sum.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- backtracking/generate_parentheses.py | 77 ++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 backtracking/generate_parentheses.py diff --git a/backtracking/generate_parentheses.py b/backtracking/generate_parentheses.py new file mode 100644 index 000000000..18c21e2a9 --- /dev/null +++ b/backtracking/generate_parentheses.py @@ -0,0 +1,77 @@ +""" +author: Aayush Soni +Given n pairs of parentheses, write a function to generate all +combinations of well-formed parentheses. +Input: n = 2 +Output: ["(())","()()"] +Leetcode link: https://leetcode.com/problems/generate-parentheses/description/ +""" + + +def backtrack( + partial: str, open_count: int, close_count: int, n: int, result: list[str] +) -> None: + """ + Generate valid combinations of balanced parentheses using recursion. + + :param partial: A string representing the current combination. + :param open_count: An integer representing the count of open parentheses. + :param close_count: An integer representing the count of close parentheses. + :param n: An integer representing the total number of pairs. + :param result: A list to store valid combinations. + :return: None + + This function uses recursion to explore all possible combinations, + ensuring that at each step, the parentheses remain balanced. + + Example: + >>> result = [] + >>> backtrack("", 0, 0, 2, result) + >>> result + ['(())', '()()'] + """ + if len(partial) == 2 * n: + # When the combination is complete, add it to the result. + result.append(partial) + return + + if open_count < n: + # If we can add an open parenthesis, do so, and recurse. + backtrack(partial + "(", open_count + 1, close_count, n, result) + + if close_count < open_count: + # If we can add a close parenthesis (it won't make the combination invalid), + # do so, and recurse. + backtrack(partial + ")", open_count, close_count + 1, n, result) + + +def generate_parenthesis(n: int) -> list[str]: + """ + Generate valid combinations of balanced parentheses for a given n. + + :param n: An integer representing the number of pairs of parentheses. + :return: A list of strings with valid combinations. + + This function uses a recursive approach to generate the combinations. + + Time Complexity: O(2^(2n)) - In the worst case, we have 2^(2n) combinations. + Space Complexity: O(n) - where 'n' is the number of pairs. + + Example 1: + >>> generate_parenthesis(3) + ['((()))', '(()())', '(())()', '()(())', '()()()'] + + Example 2: + >>> generate_parenthesis(1) + ['()'] + """ + + result: list[str] = [] + backtrack("", 0, 0, n, result) + return result + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 7c1dfec08644e4034717844b139e8db948706ccc Mon Sep 17 00:00:00 2001 From: Farzad Hayat Date: Sun, 29 Oct 2023 22:57:04 +1000 Subject: [PATCH 267/306] XOR Cipher: doctests and bug fixes (#10840) * Fixed bug with key modulus wrapping. Should be wrapping on 256, not 255. * Fixed bug with incorrect assertion type in decrypt function. * Added doctests for 4 out of 6 methods --- ciphers/xor_cipher.py | 91 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 82 insertions(+), 9 deletions(-) diff --git a/ciphers/xor_cipher.py b/ciphers/xor_cipher.py index 559036d30..e30955d41 100644 --- a/ciphers/xor_cipher.py +++ b/ciphers/xor_cipher.py @@ -35,6 +35,22 @@ class XORCipher: output: encrypted string 'content' as a list of chars if key not passed the method uses the key by the constructor. otherwise key = 1 + + Empty list + >>> XORCipher().encrypt("", 5) + [] + + One key + >>> XORCipher().encrypt("hallo welt", 1) + ['i', '`', 'm', 'm', 'n', '!', 'v', 'd', 'm', 'u'] + + Normal key + >>> XORCipher().encrypt("HALLO WELT", 32) + ['h', 'a', 'l', 'l', 'o', '\\x00', 'w', 'e', 'l', 't'] + + Key greater than 255 + >>> XORCipher().encrypt("hallo welt", 256) + ['h', 'a', 'l', 'l', 'o', ' ', 'w', 'e', 'l', 't'] """ # precondition @@ -44,7 +60,7 @@ class XORCipher: key = key or self.__key or 1 # make sure key is an appropriate size - key %= 255 + key %= 256 return [chr(ord(ch) ^ key) for ch in content] @@ -54,16 +70,32 @@ class XORCipher: output: decrypted string 'content' as a list of chars if key not passed the method uses the key by the constructor. otherwise key = 1 + + Empty list + >>> XORCipher().decrypt("", 5) + [] + + One key + >>> XORCipher().decrypt("hallo welt", 1) + ['i', '`', 'm', 'm', 'n', '!', 'v', 'd', 'm', 'u'] + + Normal key + >>> XORCipher().decrypt("HALLO WELT", 32) + ['h', 'a', 'l', 'l', 'o', '\\x00', 'w', 'e', 'l', 't'] + + Key greater than 255 + >>> XORCipher().decrypt("hallo welt", 256) + ['h', 'a', 'l', 'l', 'o', ' ', 'w', 'e', 'l', 't'] """ # precondition assert isinstance(key, int) - assert isinstance(content, list) + assert isinstance(content, str) key = key or self.__key or 1 # make sure key is an appropriate size - key %= 255 + key %= 256 return [chr(ord(ch) ^ key) for ch in content] @@ -73,6 +105,22 @@ class XORCipher: output: encrypted string 'content' if key not passed the method uses the key by the constructor. otherwise key = 1 + + Empty list + >>> XORCipher().encrypt_string("", 5) + '' + + One key + >>> XORCipher().encrypt_string("hallo welt", 1) + 'i`mmn!vdmu' + + Normal key + >>> XORCipher().encrypt_string("HALLO WELT", 32) + 'hallo\\x00welt' + + Key greater than 255 + >>> XORCipher().encrypt_string("hallo welt", 256) + 'hallo welt' """ # precondition @@ -81,9 +129,8 @@ class XORCipher: key = key or self.__key or 1 - # make sure key can be any size - while key > 255: - key -= 255 + # make sure key is an appropriate size + key %= 256 # This will be returned ans = "" @@ -99,6 +146,22 @@ class XORCipher: output: decrypted string 'content' if key not passed the method uses the key by the constructor. otherwise key = 1 + + Empty list + >>> XORCipher().decrypt_string("", 5) + '' + + One key + >>> XORCipher().decrypt_string("hallo welt", 1) + 'i`mmn!vdmu' + + Normal key + >>> XORCipher().decrypt_string("HALLO WELT", 32) + 'hallo\\x00welt' + + Key greater than 255 + >>> XORCipher().decrypt_string("hallo welt", 256) + 'hallo welt' """ # precondition @@ -107,9 +170,8 @@ class XORCipher: key = key or self.__key or 1 - # make sure key can be any size - while key > 255: - key -= 255 + # make sure key is an appropriate size + key %= 256 # This will be returned ans = "" @@ -132,6 +194,9 @@ class XORCipher: assert isinstance(file, str) assert isinstance(key, int) + # make sure key is an appropriate size + key %= 256 + try: with open(file) as fin, open("encrypt.out", "w+") as fout: # actual encrypt-process @@ -156,6 +221,9 @@ class XORCipher: assert isinstance(file, str) assert isinstance(key, int) + # make sure key is an appropriate size + key %= 256 + try: with open(file) as fin, open("decrypt.out", "w+") as fout: # actual encrypt-process @@ -168,6 +236,11 @@ class XORCipher: return True +if __name__ == "__main__": + from doctest import testmod + + testmod() + # Tests # crypt = XORCipher() # key = 67 From 6b588e4d44085d8f2a60b023f09558442ea7ae91 Mon Sep 17 00:00:00 2001 From: Kento <75509362+nkstonks@users.noreply.github.com> Date: Sun, 29 Oct 2023 23:57:40 +1100 Subject: [PATCH 268/306] Added doctests for fibonacci.py (#10836) * added other possible cases * added test for correct output of truth table * few fibonacci tests added * updating DIRECTORY.md * Update nor_gate.py * updating DIRECTORY.md * Update fibonacci.py removed whitespace * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: = <=> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/fibonacci.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/maths/fibonacci.py b/maths/fibonacci.py index e810add69..8cdd6cdb1 100644 --- a/maths/fibonacci.py +++ b/maths/fibonacci.py @@ -81,6 +81,18 @@ def fib_recursive(n: int) -> list[int]: def fib_recursive_term(i: int) -> int: """ Calculates the i-th (0-indexed) Fibonacci number using recursion + >>> fib_recursive_term(0) + 0 + >>> fib_recursive_term(1) + 1 + >>> fib_recursive_term(5) + 5 + >>> fib_recursive_term(10) + 55 + >>> fib_recursive_term(-1) + Traceback (most recent call last): + ... + Exception: n is negative """ if i < 0: raise Exception("n is negative") @@ -197,6 +209,10 @@ def fib_binet(n: int) -> list[int]: if __name__ == "__main__": + import doctest + + doctest.testmod() + num = 30 time_func(fib_iterative, num) time_func(fib_recursive, num) # Around 3s runtime From d59cf1734fd8216d90fa21ed579e18a41b63755f Mon Sep 17 00:00:00 2001 From: Arshdeep Singh Sachdeva Date: Sun, 29 Oct 2023 07:55:37 -0700 Subject: [PATCH 269/306] Add running key cipher (#10834) * Add running key cipher * update running key cipher add doctests and hints * Add test case * Update return value * range(len()) is almost always a hint to use enumerate() --------- Co-authored-by: Christian Clauss --- ciphers/running_key_cipher.py | 75 +++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 ciphers/running_key_cipher.py diff --git a/ciphers/running_key_cipher.py b/ciphers/running_key_cipher.py new file mode 100644 index 000000000..6bda417be --- /dev/null +++ b/ciphers/running_key_cipher.py @@ -0,0 +1,75 @@ +""" +https://en.wikipedia.org/wiki/Running_key_cipher +""" + + +def running_key_encrypt(key: str, plaintext: str) -> str: + """ + Encrypts the plaintext using the Running Key Cipher. + + :param key: The running key (long piece of text). + :param plaintext: The plaintext to be encrypted. + :return: The ciphertext. + """ + plaintext = plaintext.replace(" ", "").upper() + key = key.replace(" ", "").upper() + key_length = len(key) + ciphertext = [] + ord_a = ord("A") + + for i, char in enumerate(plaintext): + p = ord(char) - ord_a + k = ord(key[i % key_length]) - ord_a + c = (p + k) % 26 + ciphertext.append(chr(c + ord_a)) + + return "".join(ciphertext) + + +def running_key_decrypt(key: str, ciphertext: str) -> str: + """ + Decrypts the ciphertext using the Running Key Cipher. + + :param key: The running key (long piece of text). + :param ciphertext: The ciphertext to be decrypted. + :return: The plaintext. + """ + ciphertext = ciphertext.replace(" ", "").upper() + key = key.replace(" ", "").upper() + key_length = len(key) + plaintext = [] + ord_a = ord("A") + + for i, char in enumerate(ciphertext): + c = ord(char) - ord_a + k = ord(key[i % key_length]) - ord_a + p = (c - k) % 26 + plaintext.append(chr(p + ord_a)) + + return "".join(plaintext) + + +def test_running_key_encrypt() -> None: + """ + >>> key = "How does the duck know that? said Victor" + >>> ciphertext = running_key_encrypt(key, "DEFEND THIS") + >>> running_key_decrypt(key, ciphertext) == "DEFENDTHIS" + True + """ + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + test_running_key_encrypt() + + plaintext = input("Enter the plaintext: ").upper() + print(f"\n{plaintext = }") + + key = "How does the duck know that? said Victor" + encrypted_text = running_key_encrypt(key, plaintext) + print(f"{encrypted_text = }") + + decrypted_text = running_key_decrypt(key, encrypted_text) + print(f"{decrypted_text = }") From 3ad90cea831ee12d9c168735cbd6fab3acac446f Mon Sep 17 00:00:00 2001 From: dragon <51738561+08183080@users.noreply.github.com> Date: Sun, 29 Oct 2023 23:40:01 +0800 Subject: [PATCH 270/306] add a yield method to fibonaci (#10826) * add a yiled method to fibonaci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fibonaci * Update fibonacci.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update fibonacci.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/fibonacci.py | 79 ++++++++++++++++++++++++++++++---------------- 1 file changed, 51 insertions(+), 28 deletions(-) diff --git a/maths/fibonacci.py b/maths/fibonacci.py index 8cdd6cdb1..927700b04 100644 --- a/maths/fibonacci.py +++ b/maths/fibonacci.py @@ -1,4 +1,3 @@ -# fibonacci.py """ Calculates the Fibonacci sequence using iteration, recursion, memoization, and a simplified form of Binet's formula @@ -9,14 +8,12 @@ the Binet's formula function because the Binet formula function uses floats NOTE 2: the Binet's formula function is much more limited in the size of inputs that it can handle due to the size limitations of Python floats -RESULTS: (n = 20) -fib_iterative runtime: 0.0055 ms -fib_recursive runtime: 6.5627 ms -fib_memoization runtime: 0.0107 ms -fib_binet runtime: 0.0174 ms +See benchmark numbers in __main__ for performance comparisons/ +https://en.wikipedia.org/wiki/Fibonacci_number for more information """ import functools +from collections.abc import Iterator from math import sqrt from time import time @@ -35,6 +32,31 @@ def time_func(func, *args, **kwargs): return output +def fib_iterative_yield(n: int) -> Iterator[int]: + """ + Calculates the first n (1-indexed) Fibonacci numbers using iteration with yield + >>> list(fib_iterative_yield(0)) + [0] + >>> tuple(fib_iterative_yield(1)) + (0, 1) + >>> tuple(fib_iterative_yield(5)) + (0, 1, 1, 2, 3, 5) + >>> tuple(fib_iterative_yield(10)) + (0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55) + >>> tuple(fib_iterative_yield(-1)) + Traceback (most recent call last): + ... + ValueError: n is negative + """ + if n < 0: + raise ValueError("n is negative") + a, b = 0, 1 + yield a + for _ in range(n): + yield b + a, b = b, a + b + + def fib_iterative(n: int) -> list[int]: """ Calculates the first n (0-indexed) Fibonacci numbers using iteration @@ -49,10 +71,10 @@ def fib_iterative(n: int) -> list[int]: >>> fib_iterative(-1) Traceback (most recent call last): ... - Exception: n is negative + ValueError: n is negative """ if n < 0: - raise Exception("n is negative") + raise ValueError("n is negative") if n == 0: return [0] fib = [0, 1] @@ -75,7 +97,7 @@ def fib_recursive(n: int) -> list[int]: >>> fib_iterative(-1) Traceback (most recent call last): ... - Exception: n is negative + ValueError: n is negative """ def fib_recursive_term(i: int) -> int: @@ -95,13 +117,13 @@ def fib_recursive(n: int) -> list[int]: Exception: n is negative """ if i < 0: - raise Exception("n is negative") + raise ValueError("n is negative") if i < 2: return i return fib_recursive_term(i - 1) + fib_recursive_term(i - 2) if n < 0: - raise Exception("n is negative") + raise ValueError("n is negative") return [fib_recursive_term(i) for i in range(n + 1)] @@ -119,7 +141,7 @@ def fib_recursive_cached(n: int) -> list[int]: >>> fib_iterative(-1) Traceback (most recent call last): ... - Exception: n is negative + ValueError: n is negative """ @functools.cache @@ -128,13 +150,13 @@ def fib_recursive_cached(n: int) -> list[int]: Calculates the i-th (0-indexed) Fibonacci number using recursion """ if i < 0: - raise Exception("n is negative") + raise ValueError("n is negative") if i < 2: return i return fib_recursive_term(i - 1) + fib_recursive_term(i - 2) if n < 0: - raise Exception("n is negative") + raise ValueError("n is negative") return [fib_recursive_term(i) for i in range(n + 1)] @@ -152,10 +174,10 @@ def fib_memoization(n: int) -> list[int]: >>> fib_iterative(-1) Traceback (most recent call last): ... - Exception: n is negative + ValueError: n is negative """ if n < 0: - raise Exception("n is negative") + raise ValueError("n is negative") # Cache must be outside recursuive function # other it will reset every time it calls itself. cache: dict[int, int] = {0: 0, 1: 1, 2: 1} # Prefilled cache @@ -193,29 +215,30 @@ def fib_binet(n: int) -> list[int]: >>> fib_binet(-1) Traceback (most recent call last): ... - Exception: n is negative + ValueError: n is negative >>> fib_binet(1475) Traceback (most recent call last): ... - Exception: n is too large + ValueError: n is too large """ if n < 0: - raise Exception("n is negative") + raise ValueError("n is negative") if n >= 1475: - raise Exception("n is too large") + raise ValueError("n is too large") sqrt_5 = sqrt(5) phi = (1 + sqrt_5) / 2 return [round(phi**i / sqrt_5) for i in range(n + 1)] if __name__ == "__main__": - import doctest - - doctest.testmod() + from doctest import testmod + testmod() + # Time on an M1 MacBook Pro -- Fastest to slowest num = 30 - time_func(fib_iterative, num) - time_func(fib_recursive, num) # Around 3s runtime - time_func(fib_recursive_cached, num) # Around 0ms runtime - time_func(fib_memoization, num) - time_func(fib_binet, num) + time_func(fib_iterative_yield, num) # 0.0012 ms + time_func(fib_iterative, num) # 0.0031 ms + time_func(fib_binet, num) # 0.0062 ms + time_func(fib_memoization, num) # 0.0100 ms + time_func(fib_recursive_cached, num) # 0.0153 ms + time_func(fib_recursive, num) # 257.0910 ms From 67c85ee289b66f9c8ac02c6732240965eec879a2 Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Sun, 29 Oct 2023 21:31:54 +0530 Subject: [PATCH 271/306] Added doctest to hash_map.py (#11082) * Added doctest to heap.py * Added doctest to hash_map.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update hash_map.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- data_structures/hashing/hash_map.py | 111 +++++++++++++++++++++++++++- 1 file changed, 110 insertions(+), 1 deletion(-) diff --git a/data_structures/hashing/hash_map.py b/data_structures/hashing/hash_map.py index 1dfcc8bbf..1689e07af 100644 --- a/data_structures/hashing/hash_map.py +++ b/data_structures/hashing/hash_map.py @@ -54,6 +54,14 @@ class HashMap(MutableMapping[KEY, VAL]): Get next index. Implements linear open addressing. + >>> HashMap(5)._get_next_ind(3) + 4 + >>> HashMap(5)._get_next_ind(5) + 1 + >>> HashMap(5)._get_next_ind(6) + 2 + >>> HashMap(5)._get_next_ind(9) + 0 """ return (ind + 1) % len(self._buckets) @@ -82,6 +90,14 @@ class HashMap(MutableMapping[KEY, VAL]): Return true if we have reached safe capacity. So we need to increase the number of buckets to avoid collisions. + + >>> hm = HashMap(2) + >>> hm._add_item(1, 10) + >>> hm._add_item(2, 20) + >>> hm._is_full() + True + >>> HashMap(2)._is_full() + False """ limit = len(self._buckets) * self._capacity_factor return len(self) >= int(limit) @@ -114,17 +130,104 @@ class HashMap(MutableMapping[KEY, VAL]): ind = self._get_next_ind(ind) def _add_item(self, key: KEY, val: VAL) -> None: + """ + Try to add 3 elements when the size is 5 + >>> hm = HashMap(5) + >>> hm._add_item(1, 10) + >>> hm._add_item(2, 20) + >>> hm._add_item(3, 30) + >>> hm + HashMap(1: 10, 2: 20, 3: 30) + + Try to add 3 elements when the size is 5 + >>> hm = HashMap(5) + >>> hm._add_item(-5, 10) + >>> hm._add_item(6, 30) + >>> hm._add_item(-7, 20) + >>> hm + HashMap(-5: 10, 6: 30, -7: 20) + + Try to add 3 elements when size is 1 + >>> hm = HashMap(1) + >>> hm._add_item(10, 13.2) + >>> hm._add_item(6, 5.26) + >>> hm._add_item(7, 5.155) + >>> hm + HashMap(10: 13.2) + + Trying to add an element with a key that is a floating point value + >>> hm = HashMap(5) + >>> hm._add_item(1.5, 10) + >>> hm + HashMap(1.5: 10) + + 5. Trying to add an item with the same key + >>> hm = HashMap(5) + >>> hm._add_item(1, 10) + >>> hm._add_item(1, 20) + >>> hm + HashMap(1: 20) + """ for ind in self._iterate_buckets(key): if self._try_set(ind, key, val): break def __setitem__(self, key: KEY, val: VAL) -> None: + """ + 1. Changing value of item whose key is present + >>> hm = HashMap(5) + >>> hm._add_item(1, 10) + >>> hm.__setitem__(1, 20) + >>> hm + HashMap(1: 20) + + 2. Changing value of item whose key is not present + >>> hm = HashMap(5) + >>> hm._add_item(1, 10) + >>> hm.__setitem__(0, 20) + >>> hm + HashMap(0: 20, 1: 10) + + 3. Changing the value of the same item multiple times + >>> hm = HashMap(5) + >>> hm._add_item(1, 10) + >>> hm.__setitem__(1, 20) + >>> hm.__setitem__(1, 30) + >>> hm + HashMap(1: 30) + """ if self._is_full(): self._size_up() self._add_item(key, val) def __delitem__(self, key: KEY) -> None: + """ + >>> hm = HashMap(5) + >>> hm._add_item(1, 10) + >>> hm._add_item(2, 20) + >>> hm._add_item(3, 30) + >>> hm.__delitem__(3) + >>> hm + HashMap(1: 10, 2: 20) + >>> hm = HashMap(5) + >>> hm._add_item(-5, 10) + >>> hm._add_item(6, 30) + >>> hm._add_item(-7, 20) + >>> hm.__delitem__(-5) + >>> hm + HashMap(6: 30, -7: 20) + + # Trying to remove a non-existing item + >>> hm = HashMap(5) + >>> hm._add_item(1, 10) + >>> hm._add_item(2, 20) + >>> hm._add_item(3, 30) + >>> hm.__delitem__(4) + Traceback (most recent call last): + ... + KeyError: 4 + """ for ind in self._iterate_buckets(key): item = self._buckets[ind] if item is None: @@ -156,7 +259,13 @@ class HashMap(MutableMapping[KEY, VAL]): yield from (item.key for item in self._buckets if item) def __repr__(self) -> str: - val_string = " ,".join( + val_string = ", ".join( f"{item.key}: {item.val}" for item in self._buckets if item ) return f"HashMap({val_string})" + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From be60f42a5fe29c0e83a049a803f17992bf66be47 Mon Sep 17 00:00:00 2001 From: Aqib Javid Bhat Date: Sun, 29 Oct 2023 22:12:41 +0530 Subject: [PATCH 272/306] Add Josephus Problem (#10928) * Add Josephus Problem * Add iterative implementation of Josephus Problem * Add descriptive variable names * Update maths/josephus_problem.py * Update josephus_problem.py --------- Co-authored-by: Christian Clauss --- maths/josephus_problem.py | 130 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 130 insertions(+) create mode 100644 maths/josephus_problem.py diff --git a/maths/josephus_problem.py b/maths/josephus_problem.py new file mode 100644 index 000000000..271292ba1 --- /dev/null +++ b/maths/josephus_problem.py @@ -0,0 +1,130 @@ +""" +The Josephus problem is a famous theoretical problem related to a certain +counting-out game. This module provides functions to solve the Josephus problem +for num_people and a step_size. + +The Josephus problem is defined as follows: +- num_people are standing in a circle. +- Starting with a specified person, you count around the circle, + skipping a fixed number of people (step_size). +- The person at which you stop counting is eliminated from the circle. +- The counting continues until only one person remains. + +For more information about the Josephus problem, refer to: +https://en.wikipedia.org/wiki/Josephus_problem +""" + + +def josephus_recursive(num_people: int, step_size: int) -> int: + """ + Solve the Josephus problem for num_people and a step_size recursively. + + Args: + num_people: A positive integer representing the number of people. + step_size: A positive integer representing the step size for elimination. + + Returns: + The position of the last person remaining. + + Raises: + ValueError: If num_people or step_size is not a positive integer. + + Examples: + >>> josephus_recursive(7, 3) + 3 + >>> josephus_recursive(10, 2) + 4 + >>> josephus_recursive(0, 2) + Traceback (most recent call last): + ... + ValueError: num_people or step_size is not a positive integer. + >>> josephus_recursive(1.9, 2) + Traceback (most recent call last): + ... + ValueError: num_people or step_size is not a positive integer. + >>> josephus_recursive(-2, 2) + Traceback (most recent call last): + ... + ValueError: num_people or step_size is not a positive integer. + >>> josephus_recursive(7, 0) + Traceback (most recent call last): + ... + ValueError: num_people or step_size is not a positive integer. + >>> josephus_recursive(7, -2) + Traceback (most recent call last): + ... + ValueError: num_people or step_size is not a positive integer. + >>> josephus_recursive(1_000, 0.01) + Traceback (most recent call last): + ... + ValueError: num_people or step_size is not a positive integer. + >>> josephus_recursive("cat", "dog") + Traceback (most recent call last): + ... + ValueError: num_people or step_size is not a positive integer. + """ + if ( + not isinstance(num_people, int) + or not isinstance(step_size, int) + or num_people <= 0 + or step_size <= 0 + ): + raise ValueError("num_people or step_size is not a positive integer.") + + if num_people == 1: + return 0 + + return (josephus_recursive(num_people - 1, step_size) + step_size) % num_people + + +def find_winner(num_people: int, step_size: int) -> int: + """ + Find the winner of the Josephus problem for num_people and a step_size. + + Args: + num_people (int): Number of people. + step_size (int): Step size for elimination. + + Returns: + int: The position of the last person remaining (1-based index). + + Examples: + >>> find_winner(7, 3) + 4 + >>> find_winner(10, 2) + 5 + """ + return josephus_recursive(num_people, step_size) + 1 + + +def josephus_iterative(num_people: int, step_size: int) -> int: + """ + Solve the Josephus problem for num_people and a step_size iteratively. + + Args: + num_people (int): The number of people in the circle. + step_size (int): The number of steps to take before eliminating someone. + + Returns: + int: The position of the last person standing. + + Examples: + >>> josephus_iterative(5, 2) + 3 + >>> josephus_iterative(7, 3) + 4 + """ + circle = list(range(1, num_people + 1)) + current = 0 + + while len(circle) > 1: + current = (current + step_size - 1) % len(circle) + circle.pop(current) + + return circle[0] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From eafdb8b86697eb9dbdc03916679719dff2f6425a Mon Sep 17 00:00:00 2001 From: dahhou ilyas <110790236+dahhou-ilyas@users.noreply.github.com> Date: Sun, 29 Oct 2023 18:24:37 +0100 Subject: [PATCH 273/306] Dahhou ilyas (#10058) * add new programme in dynamique programming wildcard_matching * add new programme in dynamique programming wildcard_matching * fix bug * fix * fix * fix * fix * fix * fix error recrusion * fix error recrusion * bug fix * add doctest * The power of enumerate() --------- Co-authored-by: Christian Clauss --- dynamic_programming/wildcard_matching.py | 92 +++++++++++++----------- 1 file changed, 49 insertions(+), 43 deletions(-) diff --git a/dynamic_programming/wildcard_matching.py b/dynamic_programming/wildcard_matching.py index 4ffc4b5d4..d9a139272 100644 --- a/dynamic_programming/wildcard_matching.py +++ b/dynamic_programming/wildcard_matching.py @@ -1,62 +1,68 @@ """ -Given two strings, an input string and a pattern, -this program checks if the input string matches the pattern. +Author : ilyas dahhou +Date : Oct 7, 2023 -Example : -input_string = "baaabab" -pattern = "*****ba*****ab" -Output: True +Task: +Given an input string and a pattern, implement wildcard pattern matching with support +for '?' and '*' where: +'?' matches any single character. +'*' matches any sequence of characters (including the empty sequence). +The matching should cover the entire input string (not partial). -This problem can be solved using the concept of "DYNAMIC PROGRAMMING". - -We create a 2D boolean matrix, where each entry match_matrix[i][j] is True -if the first i characters in input_string match the first j characters -of pattern. We initialize the first row and first column based on specific -rules, then fill up the rest of the matrix using a bottom-up dynamic -programming approach. - -The amount of match that will be determined is equal to match_matrix[n][m] -where n and m are lengths of the input_string and pattern respectively. +Runtime complexity: O(m * n) +The implementation was tested on the +leetcode: https://leetcode.com/problems/wildcard-matching/ """ -def is_pattern_match(input_string: str, pattern: str) -> bool: +def is_match(string: str, pattern: str) -> bool: """ - >>> is_pattern_match('baaabab','*****ba*****ba') + >>> is_match("", "") + True + >>> is_match("aa", "a") False - >>> is_pattern_match('baaabab','*****ba*****ab') + >>> is_match("abc", "abc") True - >>> is_pattern_match('aa','*') + >>> is_match("abc", "*c") + True + >>> is_match("abc", "a*") + True + >>> is_match("abc", "*a*") + True + >>> is_match("abc", "?b?") + True + >>> is_match("abc", "*?") + True + >>> is_match("abc", "a*d") + False + >>> is_match("abc", "a*c?") + False + >>> is_match('baaabab','*****ba*****ba') + False + >>> is_match('baaabab','*****ba*****ab') + True + >>> is_match('aa','*') True """ - - input_length = len(input_string) - pattern_length = len(pattern) - - match_matrix = [[False] * (pattern_length + 1) for _ in range(input_length + 1)] - - match_matrix[0][0] = True - - for j in range(1, pattern_length + 1): - if pattern[j - 1] == "*": - match_matrix[0][j] = match_matrix[0][j - 1] - - for i in range(1, input_length + 1): - for j in range(1, pattern_length + 1): - if pattern[j - 1] in ("?", input_string[i - 1]): - match_matrix[i][j] = match_matrix[i - 1][j - 1] + dp = [[False] * (len(pattern) + 1) for _ in string + "1"] + dp[0][0] = True + # Fill in the first row + for j, char in enumerate(pattern, 1): + if char == "*": + dp[0][j] = dp[0][j - 1] + # Fill in the rest of the DP table + for i, s_char in enumerate(string, 1): + for j, p_char in enumerate(pattern, 1): + if p_char in (s_char, "?"): + dp[i][j] = dp[i - 1][j - 1] elif pattern[j - 1] == "*": - match_matrix[i][j] = match_matrix[i - 1][j] or match_matrix[i][j - 1] - else: - match_matrix[i][j] = False - - return match_matrix[input_length][pattern_length] + dp[i][j] = dp[i - 1][j] or dp[i][j - 1] + return dp[len(string)][len(pattern)] if __name__ == "__main__": import doctest doctest.testmod() - - print(f"{is_pattern_match('baaabab','*****ba*****ab')}") + print(f"{is_match('baaabab','*****ba*****ab') = }") From 760d9bedc1a7ff06a75fafaeb519a5b1979a2885 Mon Sep 17 00:00:00 2001 From: Aryansh B Date: Mon, 30 Oct 2023 02:27:37 +0530 Subject: [PATCH 274/306] Added Fast Inverse Square Root (#11054) * Feat: Added Fast inverse square root * Fix: Added typehint * Fix: Added doctests that break the code, changed var name * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix: fixed length of docstring * Update fast_inverse_sqrt.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 10 +++++++ maths/fast_inverse_sqrt.py | 54 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 64 insertions(+) create mode 100644 maths/fast_inverse_sqrt.py diff --git a/DIRECTORY.md b/DIRECTORY.md index d108acf8d..9b2c8ce73 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -34,6 +34,7 @@ * [Bitwise Addition Recursive](bit_manipulation/bitwise_addition_recursive.py) * [Count 1S Brian Kernighan Method](bit_manipulation/count_1s_brian_kernighan_method.py) * [Count Number Of One Bits](bit_manipulation/count_number_of_one_bits.py) + * [Excess 3 Code](bit_manipulation/excess_3_code.py) * [Gray Code Sequence](bit_manipulation/gray_code_sequence.py) * [Highest Set Bit](bit_manipulation/highest_set_bit.py) * [Index Of Rightmost Set Bit](bit_manipulation/index_of_rightmost_set_bit.py) @@ -170,7 +171,10 @@ * Arrays * [Equilibrium Index In Array](data_structures/arrays/equilibrium_index_in_array.py) * [Find Triplets With 0 Sum](data_structures/arrays/find_triplets_with_0_sum.py) + * [Index 2D Array In 1D](data_structures/arrays/index_2d_array_in_1d.py) + * [Kth Largest Element](data_structures/arrays/kth_largest_element.py) * [Median Two Array](data_structures/arrays/median_two_array.py) + * [Monotonic Array](data_structures/arrays/monotonic_array.py) * [Pairs With Given Sum](data_structures/arrays/pairs_with_given_sum.py) * [Permutations](data_structures/arrays/permutations.py) * [Prefix Sum](data_structures/arrays/prefix_sum.py) @@ -368,6 +372,7 @@ ## Electronics * [Apparent Power](electronics/apparent_power.py) * [Builtin Voltage](electronics/builtin_voltage.py) + * [Capacitor Equivalence](electronics/capacitor_equivalence.py) * [Carrier Concentration](electronics/carrier_concentration.py) * [Charging Capacitor](electronics/charging_capacitor.py) * [Charging Inductor](electronics/charging_inductor.py) @@ -531,12 +536,14 @@ ## Machine Learning * [Apriori Algorithm](machine_learning/apriori_algorithm.py) * [Astar](machine_learning/astar.py) + * [Automatic Differentiation](machine_learning/automatic_differentiation.py) * [Data Transformations](machine_learning/data_transformations.py) * [Decision Tree](machine_learning/decision_tree.py) * [Dimensionality Reduction](machine_learning/dimensionality_reduction.py) * Forecasting * [Run](machine_learning/forecasting/run.py) * [Frequent Pattern Growth](machine_learning/frequent_pattern_growth.py) + * [Gradient Boosting Classifier](machine_learning/gradient_boosting_classifier.py) * [Gradient Descent](machine_learning/gradient_descent.py) * [K Means Clust](machine_learning/k_means_clust.py) * [K Nearest Neighbours](machine_learning/k_nearest_neighbours.py) @@ -598,6 +605,7 @@ * [Extended Euclidean Algorithm](maths/extended_euclidean_algorithm.py) * [Factorial](maths/factorial.py) * [Factors](maths/factors.py) + * [Fast Inverse Sqrt](maths/fast_inverse_sqrt.py) * [Fermat Little Theorem](maths/fermat_little_theorem.py) * [Fibonacci](maths/fibonacci.py) * [Find Max](maths/find_max.py) @@ -648,6 +656,7 @@ * [Numerical Integration](maths/numerical_analysis/numerical_integration.py) * [Runge Kutta](maths/numerical_analysis/runge_kutta.py) * [Runge Kutta Fehlberg 45](maths/numerical_analysis/runge_kutta_fehlberg_45.py) + * [Runge Kutta Gills](maths/numerical_analysis/runge_kutta_gills.py) * [Secant Method](maths/numerical_analysis/secant_method.py) * [Simpson Rule](maths/numerical_analysis/simpson_rule.py) * [Square Root](maths/numerical_analysis/square_root.py) @@ -814,6 +823,7 @@ * [Ideal Gas Law](physics/ideal_gas_law.py) * [In Static Equilibrium](physics/in_static_equilibrium.py) * [Kinetic Energy](physics/kinetic_energy.py) + * [Lens Formulae](physics/lens_formulae.py) * [Lorentz Transformation Four Vector](physics/lorentz_transformation_four_vector.py) * [Malus Law](physics/malus_law.py) * [Mass Energy Equivalence](physics/mass_energy_equivalence.py) diff --git a/maths/fast_inverse_sqrt.py b/maths/fast_inverse_sqrt.py new file mode 100644 index 000000000..79385bb84 --- /dev/null +++ b/maths/fast_inverse_sqrt.py @@ -0,0 +1,54 @@ +""" +Fast inverse square root (1/sqrt(x)) using the Quake III algorithm. +Reference: https://en.wikipedia.org/wiki/Fast_inverse_square_root +Accuracy: https://en.wikipedia.org/wiki/Fast_inverse_square_root#Accuracy +""" + +import struct + + +def fast_inverse_sqrt(number: float) -> float: + """ + Compute the fast inverse square root of a floating-point number using the famous + Quake III algorithm. + + :param float number: Input number for which to calculate the inverse square root. + :return float: The fast inverse square root of the input number. + + Example: + >>> fast_inverse_sqrt(10) + 0.3156857923527257 + >>> fast_inverse_sqrt(4) + 0.49915357479239103 + >>> fast_inverse_sqrt(4.1) + 0.4932849504615651 + >>> fast_inverse_sqrt(0) + Traceback (most recent call last): + ... + ValueError: Input must be a positive number. + >>> fast_inverse_sqrt(-1) + Traceback (most recent call last): + ... + ValueError: Input must be a positive number. + >>> from math import isclose, sqrt + >>> all(isclose(fast_inverse_sqrt(i), 1 / sqrt(i), rel_tol=0.00132) + ... for i in range(50, 60)) + True + """ + if number <= 0: + raise ValueError("Input must be a positive number.") + i = struct.unpack(">i", struct.pack(">f", number))[0] + i = 0x5F3759DF - (i >> 1) + y = struct.unpack(">f", struct.pack(">i", i))[0] + return y * (1.5 - 0.5 * number * y * y) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + # https://en.wikipedia.org/wiki/Fast_inverse_square_root#Accuracy + from math import sqrt + + for i in range(5, 101, 5): + print(f"{i:>3}: {(1 / sqrt(i)) - fast_inverse_sqrt(i):.5f}") From c7a1331b34d6644f546f049058c1d9738fbc9b4c Mon Sep 17 00:00:00 2001 From: Khushi Shukla Date: Mon, 30 Oct 2023 02:50:57 +0530 Subject: [PATCH 275/306] Create karnaugh_map_simplification.py (#11056) * Create karnaugh_map_simplification.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update karnaugh_map_simplification.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update karnaugh_map_simplification.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update karnaugh_map_simplification.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update karnaugh_map_simplification.py * Update boolean_algebra/karnaugh_map_simplification.py Co-authored-by: Christian Clauss * Update karnaugh_map_simplification.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update karnaugh_map_simplification.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update karnaugh_map_simplification.py * Update karnaugh_map_simplification.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../karnaugh_map_simplification.py | 55 +++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 boolean_algebra/karnaugh_map_simplification.py diff --git a/boolean_algebra/karnaugh_map_simplification.py b/boolean_algebra/karnaugh_map_simplification.py new file mode 100644 index 000000000..c7f2d4c6b --- /dev/null +++ b/boolean_algebra/karnaugh_map_simplification.py @@ -0,0 +1,55 @@ +""" +https://en.wikipedia.org/wiki/Karnaugh_map +https://www.allaboutcircuits.com/technical-articles/karnaugh-map-boolean-algebraic-simplification-technique +""" + + +def simplify_kmap(kmap: list[list[int]]) -> str: + """ + Simplify the Karnaugh map. + >>> simplify_kmap(kmap=[[0, 1], [1, 1]]) + "A'B + AB' + AB" + >>> simplify_kmap(kmap=[[0, 0], [0, 0]]) + '' + >>> simplify_kmap(kmap=[[0, 1], [1, -1]]) + "A'B + AB' + AB" + >>> simplify_kmap(kmap=[[0, 1], [1, 2]]) + "A'B + AB' + AB" + >>> simplify_kmap(kmap=[[0, 1], [1, 1.1]]) + "A'B + AB' + AB" + >>> simplify_kmap(kmap=[[0, 1], [1, 'a']]) + "A'B + AB' + AB" + """ + simplified_f = [] + for a, row in enumerate(kmap): + for b, item in enumerate(row): + if item: + term = ("A" if a else "A'") + ("B" if b else "B'") + simplified_f.append(term) + return " + ".join(simplified_f) + + +def main() -> None: + """ + Main function to create and simplify a K-Map. + + >>> main() + [0, 1] + [1, 1] + Simplified Expression: + A'B + AB' + AB + """ + kmap = [[0, 1], [1, 1]] + + # Manually generate the product of [0, 1] and [0, 1] + + for row in kmap: + print(row) + + print("Simplified Expression:") + print(simplify_kmap(kmap)) + + +if __name__ == "__main__": + main() + print(f"{simplify_kmap(kmap=[[0, 1], [1, 1]]) = }") From 13e66c18d2738dd7a223c12ebbfc989faa4bcfce Mon Sep 17 00:00:00 2001 From: chien liu Date: Sun, 29 Oct 2023 22:22:19 +0100 Subject: [PATCH 276/306] Fix typo power_using_recursion.py (#11083) --- maths/power_using_recursion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maths/power_using_recursion.py b/maths/power_using_recursion.py index 462fc45bf..29283ca0f 100644 --- a/maths/power_using_recursion.py +++ b/maths/power_using_recursion.py @@ -43,7 +43,7 @@ def power(base: int, exponent: int) -> float: if __name__ == "__main__": - from doctests import testmod + from doctest import testmod testmod() print("Raise base to the power of exponent using recursion...") From 2531f8e221f04014821e16eb5eb1d3c52e5f174c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ab=C3=ADlio=20Azevedo?= Date: Sun, 29 Oct 2023 18:43:32 -0300 Subject: [PATCH 277/306] test: adding more tests to missing number algorithm (#10394) * test: adding more tests to missing number algorithm * Update missing_number.py --------- Co-authored-by: Christian Clauss --- bit_manipulation/missing_number.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/bit_manipulation/missing_number.py b/bit_manipulation/missing_number.py index 32b949daa..554887b17 100644 --- a/bit_manipulation/missing_number.py +++ b/bit_manipulation/missing_number.py @@ -11,6 +11,12 @@ def find_missing_number(nums: list[int]) -> int: Example: >>> find_missing_number([0, 1, 3, 4]) 2 + >>> find_missing_number([4, 3, 1, 0]) + 2 + >>> find_missing_number([-4, -3, -1, 0]) + -2 + >>> find_missing_number([-2, 2, 1, 3, 0]) + -1 >>> find_missing_number([1, 3, 4, 5, 6]) 2 >>> find_missing_number([6, 5, 4, 2, 1]) @@ -26,3 +32,9 @@ def find_missing_number(nums: list[int]) -> int: missing_number ^= i ^ nums[i - low] return missing_number + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From c38b222212de921295440b2b1236376136f37136 Mon Sep 17 00:00:00 2001 From: dekomori_sanae09 Date: Mon, 30 Oct 2023 04:37:21 +0530 Subject: [PATCH 278/306] serialize deserialize binary tree (#9625) * added serialize and desrialize bin tree * format files * added type hints * added type hints * Use dataclass .__eq__(), .__iter__(), and .__repr__() --------- Co-authored-by: Christian Clauss --- .../serialize_deserialize_binary_tree.py | 140 ++++++++++++++++++ 1 file changed, 140 insertions(+) create mode 100644 data_structures/binary_tree/serialize_deserialize_binary_tree.py diff --git a/data_structures/binary_tree/serialize_deserialize_binary_tree.py b/data_structures/binary_tree/serialize_deserialize_binary_tree.py new file mode 100644 index 000000000..7d3e0c61f --- /dev/null +++ b/data_structures/binary_tree/serialize_deserialize_binary_tree.py @@ -0,0 +1,140 @@ +from __future__ import annotations + +from collections.abc import Iterator +from dataclasses import dataclass + + +@dataclass +class TreeNode: + """ + A binary tree node has a value, left child, and right child. + + Props: + value: The value of the node. + left: The left child of the node. + right: The right child of the node. + """ + + value: int = 0 + left: TreeNode | None = None + right: TreeNode | None = None + + def __post_init__(self): + if not isinstance(self.value, int): + raise TypeError("Value must be an integer.") + + def __iter__(self) -> Iterator[TreeNode]: + """ + Iterate through the tree in preorder. + + Returns: + An iterator of the tree nodes. + + >>> list(TreeNode(1)) + [1,null,null] + >>> tuple(TreeNode(1, TreeNode(2), TreeNode(3))) + (1,2,null,null,3,null,null, 2,null,null, 3,null,null) + """ + yield self + yield from self.left or () + yield from self.right or () + + def __len__(self) -> int: + """ + Count the number of nodes in the tree. + + Returns: + The number of nodes in the tree. + + >>> len(TreeNode(1)) + 1 + >>> len(TreeNode(1, TreeNode(2), TreeNode(3))) + 3 + """ + return sum(1 for _ in self) + + def __repr__(self) -> str: + """ + Represent the tree as a string. + + Returns: + A string representation of the tree. + + >>> repr(TreeNode(1)) + '1,null,null' + >>> repr(TreeNode(1, TreeNode(2), TreeNode(3))) + '1,2,null,null,3,null,null' + >>> repr(TreeNode(1, TreeNode(2), TreeNode(3, TreeNode(4), TreeNode(5)))) + '1,2,null,null,3,4,null,null,5,null,null' + """ + return f"{self.value},{self.left!r},{self.right!r}".replace("None", "null") + + @classmethod + def five_tree(cls) -> TreeNode: + """ + >>> repr(TreeNode.five_tree()) + '1,2,null,null,3,4,null,null,5,null,null' + """ + root = TreeNode(1) + root.left = TreeNode(2) + root.right = TreeNode(3) + root.right.left = TreeNode(4) + root.right.right = TreeNode(5) + return root + + +def deserialize(data: str) -> TreeNode | None: + """ + Deserialize a string to a binary tree. + + Args: + data(str): The serialized string. + + Returns: + The root of the binary tree. + + >>> root = TreeNode.five_tree() + >>> serialzed_data = repr(root) + >>> deserialized = deserialize(serialzed_data) + >>> root == deserialized + True + >>> root is deserialized # two separate trees + False + >>> root.right.right.value = 6 + >>> root == deserialized + False + >>> serialzed_data = repr(root) + >>> deserialized = deserialize(serialzed_data) + >>> root == deserialized + True + >>> deserialize("") + Traceback (most recent call last): + ... + ValueError: Data cannot be empty. + """ + + if not data: + raise ValueError("Data cannot be empty.") + + # Split the serialized string by a comma to get node values + nodes = data.split(",") + + def build_tree() -> TreeNode | None: + # Get the next value from the list + value = nodes.pop(0) + + if value == "null": + return None + + node = TreeNode(int(value)) + node.left = build_tree() # Recursively build left subtree + node.right = build_tree() # Recursively build right subtree + return node + + return build_tree() + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From ad9948d5d4d65d1457b58d278e780a1b9470a715 Mon Sep 17 00:00:00 2001 From: Mohammad Esfandiyar Date: Mon, 30 Oct 2023 16:50:47 +0330 Subject: [PATCH 279/306] implementation of Gaussian Elimination pivoting as a numerical linear algebra algorithm (#10457) * Adding new implementation Adding my python implementation of Gaussian Elimination pivoting as a numerical linear algebra algorithm * Delete linear_algebra/src/GaussianEliminationpivoting.py * Adding new implementation Adding my python implementation of Gaussian Elimination pivoting as a numerical linear algebra algorithm * Delete linear_algebra/src/gaussianeliminationpivoting.py * Adding new implementation Adding my python implementation of Gaussian Elimination pivoting as a numerical linear algebra algorithm for the third time because the last two times had conflict with the rules in PR * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Delete linear_algebra/src/gaussianeliminationpivoting.py * Adding gaussianeliminationpivoting.py Adding my python implementation of Gaussian Elimination pivoting as a numerical linear algebra algorithm for the fourth time because the last three times had conflict with the rules in PR and bots * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussianeliminationpivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussianeliminationpivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussianeliminationpivoting.py I changed a to matrix and coeff_matrix for better clarity * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussianeliminationpivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussianeliminationpivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussianeliminationpivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update and rename gaussianeliminationpivoting.py to gaussian_elimination_pivoting.py renamed the file * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * Delete linear_algebra/src/gaussian_elimination_pivoting.py * Add files via upload * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Delete linear_algebra/src/gaussian_elimination_pivoting/text.py * Add files via upload * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_algebra/src/gaussian_elimination_pivoting/gaussian_elimination_pivoting.py Co-authored-by: Christian Clauss * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * Update gaussian_elimination_pivoting.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../gaussian_elimination_pivoting.py | 101 ++++++++++++++++++ .../gaussian_elimination_pivoting/matrix.txt | 4 + 2 files changed, 105 insertions(+) create mode 100644 linear_algebra/src/gaussian_elimination_pivoting/gaussian_elimination_pivoting.py create mode 100644 linear_algebra/src/gaussian_elimination_pivoting/matrix.txt diff --git a/linear_algebra/src/gaussian_elimination_pivoting/gaussian_elimination_pivoting.py b/linear_algebra/src/gaussian_elimination_pivoting/gaussian_elimination_pivoting.py new file mode 100644 index 000000000..2a86350e9 --- /dev/null +++ b/linear_algebra/src/gaussian_elimination_pivoting/gaussian_elimination_pivoting.py @@ -0,0 +1,101 @@ +import numpy as np + +matrix = np.array( + [ + [5.0, -5.0, -3.0, 4.0, -11.0], + [1.0, -4.0, 6.0, -4.0, -10.0], + [-2.0, -5.0, 4.0, -5.0, -12.0], + [-3.0, -3.0, 5.0, -5.0, 8.0], + ], + dtype=float, +) + + +def solve_linear_system(matrix: np.ndarray) -> np.ndarray: + """ + Solve a linear system of equations using Gaussian elimination with partial pivoting + + Args: + - matrix: Coefficient matrix with the last column representing the constants. + + Returns: + - Solution vector. + + Raises: + - ValueError: If the matrix is not correct (i.e., singular). + + https://courses.engr.illinois.edu/cs357/su2013/lect.htm Lecture 7 + + Example: + >>> A = np.array([[2, 1, -1], [-3, -1, 2], [-2, 1, 2]], dtype=float) + >>> B = np.array([8, -11, -3], dtype=float) + >>> solution = solve_linear_system(np.column_stack((A, B))) + >>> np.allclose(solution, np.array([2., 3., -1.])) + True + >>> solve_linear_system(np.array([[0, 0], [0, 0]], dtype=float)) + array([nan, nan]) + """ + ab = np.copy(matrix) + num_of_rows = ab.shape[0] + num_of_columns = ab.shape[1] - 1 + x_lst: list[float] = [] + + # Lead element search + for column_num in range(num_of_rows): + for i in range(column_num, num_of_columns): + if abs(ab[i][column_num]) > abs(ab[column_num][column_num]): + ab[[column_num, i]] = ab[[i, column_num]] + if ab[column_num, column_num] == 0.0: + raise ValueError("Matrix is not correct") + else: + pass + if column_num != 0: + for i in range(column_num, num_of_rows): + ab[i, :] -= ( + ab[i, column_num - 1] + / ab[column_num - 1, column_num - 1] + * ab[column_num - 1, :] + ) + + # Upper triangular matrix + for column_num in range(num_of_rows): + for i in range(column_num, num_of_columns): + if abs(ab[i][column_num]) > abs(ab[column_num][column_num]): + ab[[column_num, i]] = ab[[i, column_num]] + if ab[column_num, column_num] == 0.0: + raise ValueError("Matrix is not correct") + else: + pass + if column_num != 0: + for i in range(column_num, num_of_rows): + ab[i, :] -= ( + ab[i, column_num - 1] + / ab[column_num - 1, column_num - 1] + * ab[column_num - 1, :] + ) + + # Find x vector (Back Substitution) + for column_num in range(num_of_rows - 1, -1, -1): + x = ab[column_num, -1] / ab[column_num, column_num] + x_lst.insert(0, x) + for i in range(column_num - 1, -1, -1): + ab[i, -1] -= ab[i, column_num] * x + + # Return the solution vector + return np.asarray(x_lst) + + +if __name__ == "__main__": + from doctest import testmod + from pathlib import Path + + testmod() + file_path = Path(__file__).parent / "matrix.txt" + try: + matrix = np.loadtxt(file_path) + except FileNotFoundError: + print(f"Error: {file_path} not found. Using default matrix instead.") + + # Example usage: + print(f"Matrix:\n{matrix}") + print(f"{solve_linear_system(matrix) = }") diff --git a/linear_algebra/src/gaussian_elimination_pivoting/matrix.txt b/linear_algebra/src/gaussian_elimination_pivoting/matrix.txt new file mode 100644 index 000000000..dd895ad85 --- /dev/null +++ b/linear_algebra/src/gaussian_elimination_pivoting/matrix.txt @@ -0,0 +1,4 @@ +5.0 -5.0 -3.0 4.0 -11.0 +1.0 -4.0 6.0 -4.0 -10.0 +-2.0 -5.0 4.0 -5.0 -12.0 +-3.0 -3.0 5.0 -5.0 8.0 \ No newline at end of file From ddd4023fe66cd4a0605d4f7de5ae85680ac94167 Mon Sep 17 00:00:00 2001 From: Devashri Deulkar <95555641+Devadeut@users.noreply.github.com> Date: Mon, 30 Oct 2023 23:45:49 +0530 Subject: [PATCH 280/306] Happy number (new algorithm) (#10864) * Happy number (new algorithm) adding new algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/special_numbers/happy_number.py Co-authored-by: Christian Clauss * Update happy_number.py added new changes * Update happy_number.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update happy_number.py * Update happy_number.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update happy_number.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update happy_number.py added ValueError part in code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update happy_number.py modified and added raise Error code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update happy_number.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/special_numbers/happy_number.py | 48 +++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 maths/special_numbers/happy_number.py diff --git a/maths/special_numbers/happy_number.py b/maths/special_numbers/happy_number.py new file mode 100644 index 000000000..eac3167e3 --- /dev/null +++ b/maths/special_numbers/happy_number.py @@ -0,0 +1,48 @@ +def is_happy_number(number: int) -> bool: + """ + A happy number is a number which eventually reaches 1 when replaced by the sum of + the square of each digit. + + :param number: The number to check for happiness. + :return: True if the number is a happy number, False otherwise. + + >>> is_happy_number(19) + True + >>> is_happy_number(2) + False + >>> is_happy_number(23) + True + >>> is_happy_number(1) + True + >>> is_happy_number(0) + Traceback (most recent call last): + ... + ValueError: number=0 must be a positive integer + >>> is_happy_number(-19) + Traceback (most recent call last): + ... + ValueError: number=-19 must be a positive integer + >>> is_happy_number(19.1) + Traceback (most recent call last): + ... + ValueError: number=19.1 must be a positive integer + >>> is_happy_number("happy") + Traceback (most recent call last): + ... + ValueError: number='happy' must be a positive integer + """ + if not isinstance(number, int) or number <= 0: + msg = f"{number=} must be a positive integer" + raise ValueError(msg) + + seen = set() + while number != 1 and number not in seen: + seen.add(number) + number = sum(int(digit) ** 2 for digit in str(number)) + return number == 1 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 79a327fc07388a093e132d9df94723f24c162315 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 30 Oct 2023 19:17:00 +0100 Subject: [PATCH 281/306] [pre-commit.ci] pre-commit autoupdate (#11106) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.1.1 → v0.1.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.1.1...v0.1.3) - [github.com/psf/black: 23.10.0 → 23.10.1](https://github.com/psf/black/compare/23.10.0...23.10.1) - [github.com/tox-dev/pyproject-fmt: 1.2.0 → 1.3.0](https://github.com/tox-dev/pyproject-fmt/compare/1.2.0...1.3.0) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- DIRECTORY.md | 17 +++++++++++++++++ 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e0b9922fa..784993e6b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,12 +16,12 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.1 + rev: v0.1.3 hooks: - id: ruff - repo: https://github.com/psf/black - rev: 23.10.0 + rev: 23.10.1 hooks: - id: black @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "1.2.0" + rev: "1.3.0" hooks: - id: pyproject-fmt diff --git a/DIRECTORY.md b/DIRECTORY.md index 9b2c8ce73..ee4a521f7 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -10,6 +10,8 @@ * [All Subsequences](backtracking/all_subsequences.py) * [Coloring](backtracking/coloring.py) * [Combination Sum](backtracking/combination_sum.py) + * [Crossword Puzzle Solver](backtracking/crossword_puzzle_solver.py) + * [Generate Parentheses](backtracking/generate_parentheses.py) * [Hamiltonian Cycle](backtracking/hamiltonian_cycle.py) * [Knight Tour](backtracking/knight_tour.py) * [Match Word Pattern](backtracking/match_word_pattern.py) @@ -35,6 +37,7 @@ * [Count 1S Brian Kernighan Method](bit_manipulation/count_1s_brian_kernighan_method.py) * [Count Number Of One Bits](bit_manipulation/count_number_of_one_bits.py) * [Excess 3 Code](bit_manipulation/excess_3_code.py) + * [Find Previous Power Of Two](bit_manipulation/find_previous_power_of_two.py) * [Gray Code Sequence](bit_manipulation/gray_code_sequence.py) * [Highest Set Bit](bit_manipulation/highest_set_bit.py) * [Index Of Rightmost Set Bit](bit_manipulation/index_of_rightmost_set_bit.py) @@ -54,6 +57,8 @@ ## Boolean Algebra * [And Gate](boolean_algebra/and_gate.py) * [Imply Gate](boolean_algebra/imply_gate.py) + * [Karnaugh Map Simplification](boolean_algebra/karnaugh_map_simplification.py) + * [Multiplexer](boolean_algebra/multiplexer.py) * [Nand Gate](boolean_algebra/nand_gate.py) * [Nimply Gate](boolean_algebra/nimply_gate.py) * [Nor Gate](boolean_algebra/nor_gate.py) @@ -108,6 +113,7 @@ * [Rsa Cipher](ciphers/rsa_cipher.py) * [Rsa Factorization](ciphers/rsa_factorization.py) * [Rsa Key Generator](ciphers/rsa_key_generator.py) + * [Running Key Cipher](ciphers/running_key_cipher.py) * [Shuffled Shift Cipher](ciphers/shuffled_shift_cipher.py) * [Simple Keyword Cypher](ciphers/simple_keyword_cypher.py) * [Simple Substitution Cipher](ciphers/simple_substitution_cipher.py) @@ -150,6 +156,7 @@ * [Excel Title To Column](conversions/excel_title_to_column.py) * [Hex To Bin](conversions/hex_to_bin.py) * [Hexadecimal To Decimal](conversions/hexadecimal_to_decimal.py) + * [Ipv4 Conversion](conversions/ipv4_conversion.py) * [Length Conversion](conversions/length_conversion.py) * [Molecular Chemistry](conversions/molecular_chemistry.py) * [Octal To Binary](conversions/octal_to_binary.py) @@ -209,6 +216,7 @@ * [Red Black Tree](data_structures/binary_tree/red_black_tree.py) * [Segment Tree](data_structures/binary_tree/segment_tree.py) * [Segment Tree Other](data_structures/binary_tree/segment_tree_other.py) + * [Serialize Deserialize Binary Tree](data_structures/binary_tree/serialize_deserialize_binary_tree.py) * [Symmetric Tree](data_structures/binary_tree/symmetric_tree.py) * [Treap](data_structures/binary_tree/treap.py) * [Wavelet Tree](data_structures/binary_tree/wavelet_tree.py) @@ -410,6 +418,9 @@ * [Mandelbrot](fractals/mandelbrot.py) * [Sierpinski Triangle](fractals/sierpinski_triangle.py) +## Fuzzy Logic + * [Fuzzy Operations](fuzzy_logic/fuzzy_operations.py) + ## Genetic Algorithm * [Basic String](genetic_algorithm/basic_string.py) @@ -521,6 +532,8 @@ * [Lu Decomposition](linear_algebra/lu_decomposition.py) * Src * [Conjugate Gradient](linear_algebra/src/conjugate_gradient.py) + * Gaussian Elimination Pivoting + * [Gaussian Elimination Pivoting](linear_algebra/src/gaussian_elimination_pivoting/gaussian_elimination_pivoting.py) * [Lib](linear_algebra/src/lib.py) * [Polynom For Points](linear_algebra/src/polynom_for_points.py) * [Power Iteration](linear_algebra/src/power_iteration.py) @@ -618,12 +631,14 @@ * [Germain Primes](maths/germain_primes.py) * [Greatest Common Divisor](maths/greatest_common_divisor.py) * [Hardy Ramanujanalgo](maths/hardy_ramanujanalgo.py) + * [Integer Square Root](maths/integer_square_root.py) * [Interquartile Range](maths/interquartile_range.py) * [Is Int Palindrome](maths/is_int_palindrome.py) * [Is Ip V4 Address Valid](maths/is_ip_v4_address_valid.py) * [Is Square Free](maths/is_square_free.py) * [Jaccard Similarity](maths/jaccard_similarity.py) * [Joint Probability Distribution](maths/joint_probability_distribution.py) + * [Josephus Problem](maths/josephus_problem.py) * [Juggler Sequence](maths/juggler_sequence.py) * [Karatsuba](maths/karatsuba.py) * [Kth Lexicographic Permutation](maths/kth_lexicographic_permutation.py) @@ -646,6 +661,7 @@ * [Monte Carlo Dice](maths/monte_carlo_dice.py) * [Number Of Digits](maths/number_of_digits.py) * Numerical Analysis + * [Adams Bashforth](maths/numerical_analysis/adams_bashforth.py) * [Bisection](maths/numerical_analysis/bisection.py) * [Bisection 2](maths/numerical_analysis/bisection_2.py) * [Integration By Simpson Approx](maths/numerical_analysis/integration_by_simpson_approx.py) @@ -1223,6 +1239,7 @@ * [Anagrams](strings/anagrams.py) * [Autocomplete Using Trie](strings/autocomplete_using_trie.py) * [Barcode Validator](strings/barcode_validator.py) + * [Bitap String Match](strings/bitap_string_match.py) * [Boyer Moore Search](strings/boyer_moore_search.py) * [Camel Case To Snake Case](strings/camel_case_to_snake_case.py) * [Can String Be Rearranged As Palindrome](strings/can_string_be_rearranged_as_palindrome.py) From b072ba657f045a899ad133006d54ce5c9035c7f4 Mon Sep 17 00:00:00 2001 From: Akshar Goyal Date: Mon, 30 Oct 2023 20:00:48 -0400 Subject: [PATCH 282/306] Added tests for validate_sudoku_board.py (#11108) --- matrix/validate_sudoku_board.py | 60 +++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/matrix/validate_sudoku_board.py b/matrix/validate_sudoku_board.py index 0ee7b3df0..a7e08d169 100644 --- a/matrix/validate_sudoku_board.py +++ b/matrix/validate_sudoku_board.py @@ -54,6 +54,66 @@ def is_valid_sudoku_board(sudoku_board: list[list[str]]) -> bool: ... ,[".",".",".",".","8",".",".","7","9"] ... ]) False + >>> is_valid_sudoku_board([ + ... ["1","2","3","4","5","6","7","8","9"] + ... ,["4","5","6","7","8","9","1","2","3"] + ... ,["7","8","9","1","2","3","4","5","6"] + ... ,[".",".",".",".",".",".",".",".","."] + ... ,[".",".",".",".",".",".",".",".","."] + ... ,[".",".",".",".",".",".",".",".","."] + ... ,[".",".",".",".",".",".",".",".","."] + ... ,[".",".",".",".",".",".",".",".","."] + ... ,[".",".",".",".",".",".",".",".","."] + ... ]) + True + >>> is_valid_sudoku_board([ + ... ["1","2","3",".",".",".",".",".","."] + ... ,["4","5","6",".",".",".",".",".","."] + ... ,["7","8","9",".",".",".",".",".","."] + ... ,[".",".",".","4","5","6",".",".","."] + ... ,[".",".",".","7","8","9",".",".","."] + ... ,[".",".",".","1","2","3",".",".","."] + ... ,[".",".",".",".",".",".","7","8","9"] + ... ,[".",".",".",".",".",".","1","2","3"] + ... ,[".",".",".",".",".",".","4","5","6"] + ... ]) + True + >>> is_valid_sudoku_board([ + ... ["1","2","3",".",".",".","5","6","4"] + ... ,["4","5","6",".",".",".","8","9","7"] + ... ,["7","8","9",".",".",".","2","3","1"] + ... ,[".",".",".","4","5","6",".",".","."] + ... ,[".",".",".","7","8","9",".",".","."] + ... ,[".",".",".","1","2","3",".",".","."] + ... ,["3","1","2",".",".",".","7","8","9"] + ... ,["6","4","5",".",".",".","1","2","3"] + ... ,["9","7","8",".",".",".","4","5","6"] + ... ]) + True + >>> is_valid_sudoku_board([ + ... ["1","2","3","4","5","6","7","8","9"] + ... ,["2",".",".",".",".",".",".",".","8"] + ... ,["3",".",".",".",".",".",".",".","7"] + ... ,["4",".",".",".",".",".",".",".","6"] + ... ,["5",".",".",".",".",".",".",".","5"] + ... ,["6",".",".",".",".",".",".",".","4"] + ... ,["7",".",".",".",".",".",".",".","3"] + ... ,["8",".",".",".",".",".",".",".","2"] + ... ,["9","8","7","6","5","4","3","2","1"] + ... ]) + False + >>> is_valid_sudoku_board([ + ... ["1","2","3","8","9","7","5","6","4"] + ... ,["4","5","6","2","3","1","8","9","7"] + ... ,["7","8","9","5","6","4","2","3","1"] + ... ,["2","3","1","4","5","6","9","7","8"] + ... ,["5","6","4","7","8","9","3","1","2"] + ... ,["8","9","7","1","2","3","6","4","5"] + ... ,["3","1","2","6","4","5","7","8","9"] + ... ,["6","4","5","9","7","8","1","2","3"] + ... ,["9","7","8","3","1","2","4","5","6"] + ... ]) + True >>> is_valid_sudoku_board([["1", "2", "3", "4", "5", "6", "7", "8", "9"]]) Traceback (most recent call last): ... From 99f3a0e4c9b1a6d9ff5bba2adf65d90d55f2250a Mon Sep 17 00:00:00 2001 From: Arya Hariharan <84255987+Arya-Hari@users.noreply.github.com> Date: Tue, 31 Oct 2023 12:23:38 +0530 Subject: [PATCH 283/306] adding-docstrings (#11114) * adding-docstrings * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update intro_sort.py * Update intro_sort.py * Remove blank lines --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- sorts/intro_sort.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/sorts/intro_sort.py b/sorts/intro_sort.py index 908d28865..5a5741dc8 100644 --- a/sorts/intro_sort.py +++ b/sorts/intro_sort.py @@ -11,6 +11,18 @@ def insertion_sort(array: list, start: int = 0, end: int = 0) -> list: >>> array = [4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12] >>> insertion_sort(array, 0, len(array)) [1, 2, 4, 6, 7, 8, 8, 12, 14, 14, 22, 23, 27, 45, 56, 79] + >>> array = [21, 15, 11, 45, -2, -11, 46] + >>> insertion_sort(array, 0, len(array)) + [-11, -2, 11, 15, 21, 45, 46] + >>> array = [-2, 0, 89, 11, 48, 79, 12] + >>> insertion_sort(array, 0, len(array)) + [-2, 0, 11, 12, 48, 79, 89] + >>> array = ['a', 'z', 'd', 'p', 'v', 'l', 'o', 'o'] + >>> insertion_sort(array, 0, len(array)) + ['a', 'd', 'l', 'o', 'o', 'p', 'v', 'z'] + >>> array = [73.568, 73.56, -45.03, 1.7, 0, 89.45] + >>> insertion_sort(array, 0, len(array)) + [-45.03, 0, 1.7, 73.56, 73.568, 89.45] """ end = end or len(array) for i in range(start, end): @@ -47,6 +59,12 @@ def heap_sort(array: list) -> list: """ >>> heap_sort([4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12]) [1, 2, 4, 6, 7, 8, 8, 12, 14, 14, 22, 23, 27, 45, 56, 79] + >>> heap_sort([-2, -11, 0, 0, 0, 87, 45, -69, 78, 12, 10, 103, 89, 52]) + [-69, -11, -2, 0, 0, 0, 10, 12, 45, 52, 78, 87, 89, 103] + >>> heap_sort(['b', 'd', 'e', 'f', 'g', 'p', 'x', 'z', 'b', 's', 'e', 'u', 'v']) + ['b', 'b', 'd', 'e', 'e', 'f', 'g', 'p', 's', 'u', 'v', 'x', 'z'] + >>> heap_sort([6.2, -45.54, 8465.20, 758.56, -457.0, 0, 1, 2.879, 1.7, 11.7]) + [-457.0, -45.54, 0, 1, 1.7, 2.879, 6.2, 11.7, 758.56, 8465.2] """ n = len(array) @@ -91,6 +109,15 @@ def partition(array: list, low: int, high: int, pivot: int) -> int: >>> array = [4, 2, 6, 8, 1, 7, 8, 22, 14, 56, 27, 79, 23, 45, 14, 12] >>> partition(array, 0, len(array), 12) 8 + >>> array = [21, 15, 11, 45, -2, -11, 46] + >>> partition(array, 0, len(array), 15) + 3 + >>> array = ['a', 'z', 'd', 'p', 'v', 'l', 'o', 'o'] + >>> partition(array, 0, len(array), 'p') + 5 + >>> array = [6.2, -45.54, 8465.20, 758.56, -457.0, 0, 1, 2.879, 1.7, 11.7] + >>> partition(array, 0, len(array), 2.879) + 6 """ i = low j = high From ebfdb127e76e76c122d3110155abf644474b9fa9 Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Sat, 4 Nov 2023 17:34:57 +0530 Subject: [PATCH 284/306] Added doctest to hash_map.py (#11105) * Added doctest to heap.py * Added doctest to hash_map.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update hash_map.py * Added doctest to hash_map.py * Added doctest to hash_map.py * Added doctest to detecting_english_programmatically.py * Update detecting_english_programmatically.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- data_structures/hashing/hash_map.py | 33 +++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/data_structures/hashing/hash_map.py b/data_structures/hashing/hash_map.py index 1689e07af..6a6f8e54d 100644 --- a/data_structures/hashing/hash_map.py +++ b/data_structures/hashing/hash_map.py @@ -242,6 +242,25 @@ class HashMap(MutableMapping[KEY, VAL]): self._size_down() def __getitem__(self, key: KEY) -> VAL: + """ + Returns the item at the given key + + >>> hm = HashMap(5) + >>> hm._add_item(1, 10) + >>> hm.__getitem__(1) + 10 + + >>> hm = HashMap(5) + >>> hm._add_item(10, -10) + >>> hm._add_item(20, -20) + >>> hm.__getitem__(20) + -20 + + >>> hm = HashMap(5) + >>> hm._add_item(-1, 10) + >>> hm.__getitem__(-1) + 10 + """ for ind in self._iterate_buckets(key): item = self._buckets[ind] if item is None: @@ -253,6 +272,20 @@ class HashMap(MutableMapping[KEY, VAL]): raise KeyError(key) def __len__(self) -> int: + """ + Returns the number of items present in hashmap + + >>> hm = HashMap(5) + >>> hm._add_item(1, 10) + >>> hm._add_item(2, 20) + >>> hm._add_item(3, 30) + >>> hm.__len__() + 3 + + >>> hm = HashMap(5) + >>> hm.__len__() + 0 + """ return self._len def __iter__(self) -> Iterator[KEY]: From 257cfbdf6e2a55d48727f533ef15295065e0057b Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Sun, 5 Nov 2023 13:46:00 +0530 Subject: [PATCH 285/306] Added doctest to decision_tree.py (#11143) * Added doctest to decision_tree.py * Update decision_tree.py * Update machine_learning/decision_tree.py * Update machine_learning/decision_tree.py * raise ValueError() * Update decision_tree.py --------- Co-authored-by: Christian Clauss --- machine_learning/decision_tree.py | 47 ++++++++++++++++++++++--------- 1 file changed, 34 insertions(+), 13 deletions(-) diff --git a/machine_learning/decision_tree.py b/machine_learning/decision_tree.py index 7cd1b02c4..c67e09c7f 100644 --- a/machine_learning/decision_tree.py +++ b/machine_learning/decision_tree.py @@ -18,7 +18,7 @@ class DecisionTree: def mean_squared_error(self, labels, prediction): """ mean_squared_error: - @param labels: a one dimensional numpy array + @param labels: a one-dimensional numpy array @param prediction: a floating point value return value: mean_squared_error calculates the error if prediction is used to estimate the labels @@ -44,26 +44,47 @@ class DecisionTree: def train(self, x, y): """ train: - @param x: a one dimensional numpy array - @param y: a one dimensional numpy array. + @param x: a one-dimensional numpy array + @param y: a one-dimensional numpy array. The contents of y are the labels for the corresponding X values - train does not have a return value - """ + train() does not have a return value - """ - this section is to check that the inputs conform to our dimensionality + Examples: + 1. Try to train when x & y are of same length & 1 dimensions (No errors) + >>> dt = DecisionTree() + >>> dt.train(np.array([10,20,30,40,50]),np.array([0,0,0,1,1])) + + 2. Try to train when x is 2 dimensions + >>> dt = DecisionTree() + >>> dt.train(np.array([[1,2,3,4,5],[1,2,3,4,5]]),np.array([0,0,0,1,1])) + Traceback (most recent call last): + ... + ValueError: Input data set must be one-dimensional + + 3. Try to train when x and y are not of the same length + >>> dt = DecisionTree() + >>> dt.train(np.array([1,2,3,4,5]),np.array([[0,0,0,1,1],[0,0,0,1,1]])) + Traceback (most recent call last): + ... + ValueError: x and y have different lengths + + 4. Try to train when x & y are of the same length but different dimensions + >>> dt = DecisionTree() + >>> dt.train(np.array([1,2,3,4,5]),np.array([[1],[2],[3],[4],[5]])) + Traceback (most recent call last): + ... + ValueError: Data set labels must be one-dimensional + + This section is to check that the inputs conform to our dimensionality constraints """ if x.ndim != 1: - print("Error: Input data set must be one dimensional") - return + raise ValueError("Input data set must be one-dimensional") if len(x) != len(y): - print("Error: X and y have different lengths") - return + raise ValueError("x and y have different lengths") if y.ndim != 1: - print("Error: Data set labels must be one dimensional") - return + raise ValueError("Data set labels must be one-dimensional") if len(x) < 2 * self.min_leaf_size: self.prediction = np.mean(y) From 1e50cf366022a5c44abfa5adf5e01bef62524cc3 Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Sun, 5 Nov 2023 14:08:39 +0530 Subject: [PATCH 286/306] Added doctest to binary_search_tree.py (#11141) * Added doctest to binary_search_tree.py * Update binary_search_tree.py * Update binary_search_tree.py --------- Co-authored-by: Christian Clauss --- .../binary_tree/binary_search_tree.py | 32 +++++++++++++++---- 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index 38691c475..f08f278a8 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -10,8 +10,7 @@ Example / \ / 4 7 13 ->>> t = BinarySearchTree() ->>> t.insert(8, 3, 6, 1, 10, 14, 13, 4, 7) +>>> t = BinarySearchTree().insert(8, 3, 6, 1, 10, 14, 13, 4, 7) >>> print(" ".join(repr(i.value) for i in t.traversal_tree())) 8 3 1 6 4 7 10 14 13 @@ -40,7 +39,16 @@ Other example: >>> testlist = (8, 3, 6, 1, 10, 14, 13, 4, 7) >>> t = BinarySearchTree() >>> for i in testlist: -... t.insert(i) +... t.insert(i) # doctest: +ELLIPSIS +BinarySearchTree(root=8) +BinarySearchTree(root={'8': (3, None)}) +BinarySearchTree(root={'8': ({'3': (None, 6)}, None)}) +BinarySearchTree(root={'8': ({'3': (1, 6)}, None)}) +BinarySearchTree(root={'8': ({'3': (1, 6)}, 10)}) +BinarySearchTree(root={'8': ({'3': (1, 6)}, {'10': (None, 14)})}) +BinarySearchTree(root={'8': ({'3': (1, 6)}, {'10': (None, {'14': (13, None)})})}) +BinarySearchTree(root={'8': ({'3': (1, {'6': (4, None)})}, {'10': (None, {'14': ... +BinarySearchTree(root={'8': ({'3': (1, {'6': (4, 7)})}, {'10': (None, {'14': (13, ... Prints all the elements of the list in order traversal >>> print(t) @@ -84,7 +92,7 @@ from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass -from typing import Any +from typing import Any, Self @dataclass @@ -145,7 +153,18 @@ class BinarySearchTree: self.root = new_children def empty(self) -> bool: - return self.root is None + """ + Returns True if the tree does not have any element(s). + False if the tree has element(s). + + >>> BinarySearchTree().empty() + True + >>> BinarySearchTree().insert(1).empty() + False + >>> BinarySearchTree().insert(8, 3, 6, 1, 10, 14, 13, 4, 7).empty() + False + """ + return not self.root def __insert(self, value) -> None: """ @@ -173,9 +192,10 @@ class BinarySearchTree: parent_node = parent_node.right new_node.parent = parent_node - def insert(self, *values) -> None: + def insert(self, *values) -> Self: for value in values: self.__insert(value) + return self def search(self, value) -> Node | None: if self.empty(): From e48ea7d39643f3c15f830ccf63a363378858a001 Mon Sep 17 00:00:00 2001 From: SEIKH NABAB UDDIN <93948993+nababuddin@users.noreply.github.com> Date: Sun, 5 Nov 2023 14:13:52 +0530 Subject: [PATCH 287/306] Create get_ip_geolocation.py (#10902) * Create get_ip_geolocation.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update get_ip_geolocation.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update get_ip_geolocation.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- web_programming/get_ip_geolocation.py | 40 +++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 web_programming/get_ip_geolocation.py diff --git a/web_programming/get_ip_geolocation.py b/web_programming/get_ip_geolocation.py new file mode 100644 index 000000000..62eaeafce --- /dev/null +++ b/web_programming/get_ip_geolocation.py @@ -0,0 +1,40 @@ +import requests + + +# Function to get geolocation data for an IP address +def get_ip_geolocation(ip_address: str) -> str: + try: + # Construct the URL for the IP geolocation API + url = f"https://ipinfo.io/{ip_address}/json" + + # Send a GET request to the API + response = requests.get(url) + + # Check if the HTTP request was successful + response.raise_for_status() + + # Parse the response as JSON + data = response.json() + + # Check if city, region, and country information is available + if "city" in data and "region" in data and "country" in data: + location = f"Location: {data['city']}, {data['region']}, {data['country']}" + else: + location = "Location data not found." + + return location + except requests.exceptions.RequestException as e: + # Handle network-related exceptions + return f"Request error: {e}" + except ValueError as e: + # Handle JSON parsing errors + return f"JSON parsing error: {e}" + + +if __name__ == "__main__": + # Prompt the user to enter an IP address + ip_address = input("Enter an IP address: ") + + # Get the geolocation data and print it + location = get_ip_geolocation(ip_address) + print(location) From eb989c08cdbf82e1a4db6481371f3e9ccb3bcf99 Mon Sep 17 00:00:00 2001 From: Sunny Kumar <37464973+Skyad@users.noreply.github.com> Date: Mon, 6 Nov 2023 17:40:50 +0530 Subject: [PATCH 288/306] Data structures/arrays/triplet sum (#11134) * updated code for find triplets with 0 sum Signed-off-by: Skyad <777.sunnykumar@gmail.com> * extra line added at the end of file Signed-off-by: Sunny Kumar * extra line added at the end of file Signed-off-by: Skyad <777.sunnykumar@gmail.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * file updated with comments Signed-off-by: Skyad <777.sunnykumar@gmail.com> * updated the comments as suggested by community Signed-off-by: Sunny Kumar * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * file updated according to community comments Signed-off-by: Skyad <777.sunnykumar@gmail.com> * Update find_triplets_with_0_sum.py --------- Signed-off-by: Skyad <777.sunnykumar@gmail.com> Signed-off-by: Sunny Kumar Co-authored-by: Sunny Kumar Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../arrays/find_triplets_with_0_sum.py | 63 +++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/data_structures/arrays/find_triplets_with_0_sum.py b/data_structures/arrays/find_triplets_with_0_sum.py index 8217ff857..52e521906 100644 --- a/data_structures/arrays/find_triplets_with_0_sum.py +++ b/data_structures/arrays/find_triplets_with_0_sum.py @@ -22,3 +22,66 @@ def find_triplets_with_0_sum(nums: list[int]) -> list[list[int]]: list(x) for x in sorted({abc for abc in combinations(sorted(nums), 3) if not sum(abc)}) ] + + +def find_triplets_with_0_sum_hashing(arr: list[int]) -> list[list[int]]: + """ + Function for finding the triplets with a given sum in the array using hashing. + + Given a list of integers, return elements a, b, c such that a + b + c = 0. + + Args: + nums: list of integers + Returns: + list of lists of integers where sum(each_list) == 0 + Examples: + >>> find_triplets_with_0_sum_hashing([-1, 0, 1, 2, -1, -4]) + [[-1, 0, 1], [-1, -1, 2]] + >>> find_triplets_with_0_sum_hashing([]) + [] + >>> find_triplets_with_0_sum_hashing([0, 0, 0]) + [[0, 0, 0]] + >>> find_triplets_with_0_sum_hashing([1, 2, 3, 0, -1, -2, -3]) + [[-1, 0, 1], [-3, 1, 2], [-2, 0, 2], [-2, -1, 3], [-3, 0, 3]] + + Time complexity: O(N^2) + Auxiliary Space: O(N) + + """ + target_sum = 0 + + # Initialize the final output array with blank. + output_arr = [] + + # Set the initial element as arr[i]. + for index, item in enumerate(arr[:-2]): + # to store second elements that can complement the final sum. + set_initialize = set() + + # current sum needed for reaching the target sum + current_sum = target_sum - item + + # Traverse the subarray arr[i+1:]. + for other_item in arr[index + 1 :]: + # required value for the second element + required_value = current_sum - other_item + + # Verify if the desired value exists in the set. + if required_value in set_initialize: + # finding triplet elements combination. + combination_array = sorted([item, other_item, required_value]) + if combination_array not in output_arr: + output_arr.append(combination_array) + + # Include the current element in the set + # for subsequent complement verification. + set_initialize.add(other_item) + + # Return all the triplet combinations. + return output_arr + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From fa508d7b8bf9696805e97deac71e657256500ab7 Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Mon, 6 Nov 2023 17:44:39 +0530 Subject: [PATCH 289/306] Added doctest to detecting_english_programmatically.py (#11135) --- strings/detecting_english_programmatically.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/strings/detecting_english_programmatically.py b/strings/detecting_english_programmatically.py index b9000101b..e30e2ea8d 100644 --- a/strings/detecting_english_programmatically.py +++ b/strings/detecting_english_programmatically.py @@ -25,6 +25,18 @@ def get_english_count(message: str) -> float: def remove_non_letters(message: str) -> str: + """ + >>> remove_non_letters("Hi! how are you?") + 'Hi how are you' + >>> remove_non_letters("P^y%t)h@o*n") + 'Python' + >>> remove_non_letters("1+1=2") + '' + >>> remove_non_letters("www.google.com/") + 'wwwgooglecom' + >>> remove_non_letters("") + '' + """ return "".join(symbol for symbol in message if symbol in LETTERS_AND_SPACE) From 12e401650c8afd4b6cf69ddab09a882d1eb6ff5c Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Mon, 6 Nov 2023 17:48:41 +0530 Subject: [PATCH 290/306] Added doctest to string_switch_case.py (#11136) * Added doctest to string_switch_case.py * Update string_switch_case.py --- strings/string_switch_case.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/strings/string_switch_case.py b/strings/string_switch_case.py index 9a07472df..c16d9fa55 100644 --- a/strings/string_switch_case.py +++ b/strings/string_switch_case.py @@ -28,6 +28,12 @@ def to_simple_case(str_: str) -> str: """ >>> to_simple_case("one two 31235three4four") 'OneTwo31235three4four' + >>> to_simple_case("This should be combined") + 'ThisShouldBeCombined' + >>> to_simple_case("The first letters are capitalized, then string is merged") + 'TheFirstLettersAreCapitalizedThenStringIsMerged' + >>> to_simple_case("special characters :, ', %, ^, $, are ignored") + 'SpecialCharactersAreIgnored' """ string_split = split_input(str_) return "".join( @@ -37,6 +43,14 @@ def to_simple_case(str_: str) -> str: def to_complex_case(text: str, upper: bool, separator: str) -> str: """ + Returns the string concatenated with the delimiter we provide. + + Parameters: + @text: The string on which we want to perform operation + @upper: Boolean value to determine whether we want capitalized result or not + @separator: The delimiter with which we want to concatenate words + + Examples: >>> to_complex_case("one two 31235three4four", True, "_") 'ONE_TWO_31235THREE4FOUR' >>> to_complex_case("one two 31235three4four", False, "-") From a13e9c21374caf40652ee75cc3620f3ac0c72ff3 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 7 Nov 2023 06:49:09 +0600 Subject: [PATCH 291/306] [pre-commit.ci] pre-commit autoupdate (#11146) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.1.3 → v0.1.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.1.3...v0.1.4) - [github.com/tox-dev/pyproject-fmt: 1.3.0 → 1.4.1](https://github.com/tox-dev/pyproject-fmt/compare/1.3.0...1.4.1) * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 2 ++ pyproject.toml | 8 ++++---- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 784993e6b..1bb3de782 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.3 + rev: v0.1.4 hooks: - id: ruff @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "1.3.0" + rev: "1.4.1" hooks: - id: pyproject-fmt diff --git a/DIRECTORY.md b/DIRECTORY.md index ee4a521f7..cb4b00b04 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -725,6 +725,7 @@ * [Carmichael Number](maths/special_numbers/carmichael_number.py) * [Catalan Number](maths/special_numbers/catalan_number.py) * [Hamming Numbers](maths/special_numbers/hamming_numbers.py) + * [Happy Number](maths/special_numbers/happy_number.py) * [Harshad Numbers](maths/special_numbers/harshad_numbers.py) * [Hexagonal Number](maths/special_numbers/hexagonal_number.py) * [Krishnamurthy Number](maths/special_numbers/krishnamurthy_number.py) @@ -1310,6 +1311,7 @@ * [Get Amazon Product Data](web_programming/get_amazon_product_data.py) * [Get Imdb Top 250 Movies Csv](web_programming/get_imdb_top_250_movies_csv.py) * [Get Imdbtop](web_programming/get_imdbtop.py) + * [Get Ip Geolocation](web_programming/get_ip_geolocation.py) * [Get Top Billionaires](web_programming/get_top_billionaires.py) * [Get Top Hn Posts](web_programming/get_top_hn_posts.py) * [Get User Tweets](web_programming/get_user_tweets.py) diff --git a/pyproject.toml b/pyproject.toml index 5d27142d1..c7163dc78 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -117,6 +117,10 @@ max-branches = 20 # default: 12 max-returns = 8 # default: 6 max-statements = 88 # default: 50 +[tool.codespell] +ignore-words-list = "3rt,ans,bitap,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" +skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" + [tool.pytest.ini_options] markers = [ "mat_ops: mark a test as utilizing matrix operations.", @@ -133,7 +137,3 @@ omit = [ "project_euler/*" ] sort = "Cover" - -[tool.codespell] -ignore-words-list = "3rt,ans,bitap,crate,damon,fo,followings,hist,iff,kwanza,manuel,mater,secant,som,sur,tim,toi,zar" -skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" From 8b7352626e54b619113b771a7e9586aabe603fa7 Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Sun, 12 Nov 2023 07:43:04 +0530 Subject: [PATCH 292/306] Added doctest to randomized_heap.py (#11151) --- data_structures/heap/randomized_heap.py | 30 +++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/data_structures/heap/randomized_heap.py b/data_structures/heap/randomized_heap.py index c0f9888f8..12888c1f4 100644 --- a/data_structures/heap/randomized_heap.py +++ b/data_structures/heap/randomized_heap.py @@ -22,14 +22,40 @@ class RandomizedHeapNode(Generic[T]): @property def value(self) -> T: - """Return the value of the node.""" + """ + Return the value of the node. + + >>> rhn = RandomizedHeapNode(10) + >>> rhn.value + 10 + >>> rhn = RandomizedHeapNode(-10) + >>> rhn.value + -10 + """ return self._value @staticmethod def merge( root1: RandomizedHeapNode[T] | None, root2: RandomizedHeapNode[T] | None ) -> RandomizedHeapNode[T] | None: - """Merge 2 nodes together.""" + """ + Merge 2 nodes together. + + >>> rhn1 = RandomizedHeapNode(10) + >>> rhn2 = RandomizedHeapNode(20) + >>> RandomizedHeapNode.merge(rhn1, rhn2).value + 10 + + >>> rhn1 = RandomizedHeapNode(20) + >>> rhn2 = RandomizedHeapNode(10) + >>> RandomizedHeapNode.merge(rhn1, rhn2).value + 10 + + >>> rhn1 = RandomizedHeapNode(5) + >>> rhn2 = RandomizedHeapNode(0) + >>> RandomizedHeapNode.merge(rhn1, rhn2).value + 0 + """ if not root1: return root2 From fb17eeab7d1fbc608a538b6d154d2c08781e087d Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Sun, 12 Nov 2023 07:46:43 +0530 Subject: [PATCH 293/306] Added doctest to stack.py (#11149) --- data_structures/stacks/stack.py | 92 +++++++++++++++++++++++++++++++-- 1 file changed, 87 insertions(+), 5 deletions(-) diff --git a/data_structures/stacks/stack.py b/data_structures/stacks/stack.py index a14f4648a..93698f5aa 100644 --- a/data_structures/stacks/stack.py +++ b/data_structures/stacks/stack.py @@ -33,7 +33,23 @@ class Stack(Generic[T]): return str(self.stack) def push(self, data: T) -> None: - """Push an element to the top of the stack.""" + """ + Push an element to the top of the stack. + + >>> S = Stack(2) # stack size = 2 + >>> S.push(10) + >>> S.push(20) + >>> print(S) + [10, 20] + + >>> S = Stack(1) # stack size = 1 + >>> S.push(10) + >>> S.push(20) + Traceback (most recent call last): + ... + data_structures.stacks.stack.StackOverflowError + + """ if len(self.stack) >= self.limit: raise StackOverflowError self.stack.append(data) @@ -42,6 +58,12 @@ class Stack(Generic[T]): """ Pop an element off of the top of the stack. + >>> S = Stack() + >>> S.push(-5) + >>> S.push(10) + >>> S.pop() + 10 + >>> Stack().pop() Traceback (most recent call last): ... @@ -55,7 +77,13 @@ class Stack(Generic[T]): """ Peek at the top-most element of the stack. - >>> Stack().pop() + >>> S = Stack() + >>> S.push(-5) + >>> S.push(10) + >>> S.peek() + 10 + + >>> Stack().peek() Traceback (most recent call last): ... data_structures.stacks.stack.StackUnderflowError @@ -65,18 +93,68 @@ class Stack(Generic[T]): return self.stack[-1] def is_empty(self) -> bool: - """Check if a stack is empty.""" + """ + Check if a stack is empty. + + >>> S = Stack() + >>> S.is_empty() + True + + >>> S = Stack() + >>> S.push(10) + >>> S.is_empty() + False + """ return not bool(self.stack) def is_full(self) -> bool: + """ + >>> S = Stack() + >>> S.is_full() + False + + >>> S = Stack(1) + >>> S.push(10) + >>> S.is_full() + True + """ return self.size() == self.limit def size(self) -> int: - """Return the size of the stack.""" + """ + Return the size of the stack. + + >>> S = Stack(3) + >>> S.size() + 0 + + >>> S = Stack(3) + >>> S.push(10) + >>> S.size() + 1 + + >>> S = Stack(3) + >>> S.push(10) + >>> S.push(20) + >>> S.size() + 2 + """ return len(self.stack) def __contains__(self, item: T) -> bool: - """Check if item is in stack""" + """ + Check if item is in stack + + >>> S = Stack(3) + >>> S.push(10) + >>> 10 in S + True + + >>> S = Stack(3) + >>> S.push(10) + >>> 20 in S + False + """ return item in self.stack @@ -131,3 +209,7 @@ def test_stack() -> None: if __name__ == "__main__": test_stack() + + import doctest + + doctest.testmod() From 0e2e6abd6f24d0d816212ff0480a18abecd3028b Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Mon, 13 Nov 2023 16:35:22 +0530 Subject: [PATCH 294/306] Added doctest to heap.py (#11129) * Added doctest to heap.py * Update heap.py --- data_structures/heap/heap.py | 32 +++++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/data_structures/heap/heap.py b/data_structures/heap/heap.py index 29bff3af0..7b15e69f1 100644 --- a/data_structures/heap/heap.py +++ b/data_structures/heap/heap.py @@ -53,7 +53,37 @@ class Heap(Generic[T]): return str(self.h) def parent_index(self, child_idx: int) -> int | None: - """return the parent index of given child""" + """ + returns the parent index based on the given child index + + >>> h = Heap() + >>> h.build_max_heap([103, 9, 1, 7, 11, 15, 25, 201, 209, 107, 5]) + >>> h + [209, 201, 25, 103, 107, 15, 1, 9, 7, 11, 5] + + >>> h.parent_index(-1) # returns none if index is <=0 + + >>> h.parent_index(0) # returns none if index is <=0 + + >>> h.parent_index(1) + 0 + >>> h.parent_index(2) + 0 + >>> h.parent_index(3) + 1 + >>> h.parent_index(4) + 1 + >>> h.parent_index(5) + 2 + >>> h.parent_index(10.5) + 4.0 + >>> h.parent_index(209.0) + 104.0 + >>> h.parent_index("Test") + Traceback (most recent call last): + ... + TypeError: '>' not supported between instances of 'str' and 'int' + """ if child_idx > 0: return (child_idx - 1) // 2 return None From 5f61af4fbbab33704b4aebd6523c64f8e6360869 Mon Sep 17 00:00:00 2001 From: MC <129918860+FishyGitHubUser@users.noreply.github.com> Date: Thu, 16 Nov 2023 19:00:48 +0800 Subject: [PATCH 295/306] Fix ignore venv in build_directory_md.py (#11156) Co-authored-by: MICHAEL CASTLE --- scripts/build_directory_md.py | 6 +++++- web_programming/{get_imdbtop.py => get_imdbtop.py.DISABLED} | 0 2 files changed, 5 insertions(+), 1 deletion(-) rename web_programming/{get_imdbtop.py => get_imdbtop.py.DISABLED} (100%) diff --git a/scripts/build_directory_md.py b/scripts/build_directory_md.py index 24bc00cd0..aa95b95db 100755 --- a/scripts/build_directory_md.py +++ b/scripts/build_directory_md.py @@ -6,7 +6,11 @@ from collections.abc import Iterator def good_file_paths(top_dir: str = ".") -> Iterator[str]: for dir_path, dir_names, filenames in os.walk(top_dir): - dir_names[:] = [d for d in dir_names if d != "scripts" and d[0] not in "._"] + dir_names[:] = [ + d + for d in dir_names + if d != "scripts" and d[0] not in "._" and "venv" not in d + ] for filename in filenames: if filename == "__init__.py": continue diff --git a/web_programming/get_imdbtop.py b/web_programming/get_imdbtop.py.DISABLED similarity index 100% rename from web_programming/get_imdbtop.py rename to web_programming/get_imdbtop.py.DISABLED From 3999abfea392209fcb67c2218774a229878cf4cb Mon Sep 17 00:00:00 2001 From: Margaret <62753112+meg-1@users.noreply.github.com> Date: Fri, 24 Nov 2023 20:00:21 +0200 Subject: [PATCH 296/306] adding a geometry module (#11138) * adding a geometry module * fixing errors and adding type hints * Create code_review_feedback.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * implementing suggestions * fixing ruff errors * Update geometry/code_review_feedback.py * Update geometry/code_review_feedback.py * Update geometry/geometry.py * Apply suggestions from code review * Delete geometry/code_review_feedback.py * Update geometry/geometry.py * Update geometry/geometry.py --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- geometry/geometry.py | 259 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 259 insertions(+) create mode 100644 geometry/geometry.py diff --git a/geometry/geometry.py b/geometry/geometry.py new file mode 100644 index 000000000..9e353dee1 --- /dev/null +++ b/geometry/geometry.py @@ -0,0 +1,259 @@ +from __future__ import annotations + +import math +from dataclasses import dataclass, field +from types import NoneType +from typing import Self + +# Building block classes + + +@dataclass +class Angle: + """ + An Angle in degrees (unit of measurement) + + >>> Angle() + Angle(degrees=90) + >>> Angle(45.5) + Angle(degrees=45.5) + >>> Angle(-1) + Traceback (most recent call last): + ... + TypeError: degrees must be a numeric value between 0 and 360. + >>> Angle(361) + Traceback (most recent call last): + ... + TypeError: degrees must be a numeric value between 0 and 360. + """ + + degrees: float = 90 + + def __post_init__(self) -> None: + if not isinstance(self.degrees, (int, float)) or not 0 <= self.degrees <= 360: + raise TypeError("degrees must be a numeric value between 0 and 360.") + + +@dataclass +class Side: + """ + A side of a two dimensional Shape such as Polygon, etc. + adjacent_sides: a list of sides which are adjacent to the current side + angle: the angle in degrees between each adjacent side + length: the length of the current side in meters + + >>> Side(5) + Side(length=5, angle=Angle(degrees=90), next_side=None) + >>> Side(5, Angle(45.6)) + Side(length=5, angle=Angle(degrees=45.6), next_side=None) + >>> Side(5, Angle(45.6), Side(1, Angle(2))) # doctest: +ELLIPSIS + Side(length=5, angle=Angle(degrees=45.6), next_side=Side(length=1, angle=Angle(d... + """ + + length: float + angle: Angle = field(default_factory=Angle) + next_side: Side | None = None + + def __post_init__(self) -> None: + if not isinstance(self.length, (int, float)) or self.length <= 0: + raise TypeError("length must be a positive numeric value.") + if not isinstance(self.angle, Angle): + raise TypeError("angle must be an Angle object.") + if not isinstance(self.next_side, (Side, NoneType)): + raise TypeError("next_side must be a Side or None.") + + +@dataclass +class Ellipse: + """ + A geometric Ellipse on a 2D surface + + >>> Ellipse(5, 10) + Ellipse(major_radius=5, minor_radius=10) + >>> Ellipse(5, 10) is Ellipse(5, 10) + False + >>> Ellipse(5, 10) == Ellipse(5, 10) + True + """ + + major_radius: float + minor_radius: float + + @property + def area(self) -> float: + """ + >>> Ellipse(5, 10).area + 157.07963267948966 + """ + return math.pi * self.major_radius * self.minor_radius + + @property + def perimeter(self) -> float: + """ + >>> Ellipse(5, 10).perimeter + 47.12388980384689 + """ + return math.pi * (self.major_radius + self.minor_radius) + + +class Circle(Ellipse): + """ + A geometric Circle on a 2D surface + + >>> Circle(5) + Circle(radius=5) + >>> Circle(5) is Circle(5) + False + >>> Circle(5) == Circle(5) + True + >>> Circle(5).area + 78.53981633974483 + >>> Circle(5).perimeter + 31.41592653589793 + """ + + def __init__(self, radius: float) -> None: + super().__init__(radius, radius) + self.radius = radius + + def __repr__(self) -> str: + return f"Circle(radius={self.radius})" + + @property + def diameter(self) -> float: + """ + >>> Circle(5).diameter + 10 + """ + return self.radius * 2 + + def max_parts(self, num_cuts: float) -> float: + """ + Return the maximum number of parts that circle can be divided into if cut + 'num_cuts' times. + + >>> circle = Circle(5) + >>> circle.max_parts(0) + 1.0 + >>> circle.max_parts(7) + 29.0 + >>> circle.max_parts(54) + 1486.0 + >>> circle.max_parts(22.5) + 265.375 + >>> circle.max_parts(-222) + Traceback (most recent call last): + ... + TypeError: num_cuts must be a positive numeric value. + >>> circle.max_parts("-222") + Traceback (most recent call last): + ... + TypeError: num_cuts must be a positive numeric value. + """ + if not isinstance(num_cuts, (int, float)) or num_cuts < 0: + raise TypeError("num_cuts must be a positive numeric value.") + return (num_cuts + 2 + num_cuts**2) * 0.5 + + +@dataclass +class Polygon: + """ + An abstract class which represents Polygon on a 2D surface. + + >>> Polygon() + Polygon(sides=[]) + """ + + sides: list[Side] = field(default_factory=list) + + def add_side(self, side: Side) -> Self: + """ + >>> Polygon().add_side(Side(5)) + Polygon(sides=[Side(length=5, angle=Angle(degrees=90), next_side=None)]) + """ + self.sides.append(side) + return self + + def get_side(self, index: int) -> Side: + """ + >>> Polygon().get_side(0) + Traceback (most recent call last): + ... + IndexError: list index out of range + >>> Polygon().add_side(Side(5)).get_side(-1) + Side(length=5, angle=Angle(degrees=90), next_side=None) + """ + return self.sides[index] + + def set_side(self, index: int, side: Side) -> Self: + """ + >>> Polygon().set_side(0, Side(5)) + Traceback (most recent call last): + ... + IndexError: list assignment index out of range + >>> Polygon().add_side(Side(5)).set_side(0, Side(10)) + Polygon(sides=[Side(length=10, angle=Angle(degrees=90), next_side=None)]) + """ + self.sides[index] = side + return self + + +class Rectangle(Polygon): + """ + A geometric rectangle on a 2D surface. + + >>> rectangle_one = Rectangle(5, 10) + >>> rectangle_one.perimeter() + 30 + >>> rectangle_one.area() + 50 + """ + + def __init__(self, short_side_length: float, long_side_length: float) -> None: + super().__init__() + self.short_side_length = short_side_length + self.long_side_length = long_side_length + self.post_init() + + def post_init(self) -> None: + """ + >>> Rectangle(5, 10) # doctest: +NORMALIZE_WHITESPACE + Rectangle(sides=[Side(length=5, angle=Angle(degrees=90), next_side=None), + Side(length=10, angle=Angle(degrees=90), next_side=None)]) + """ + self.short_side = Side(self.short_side_length) + self.long_side = Side(self.long_side_length) + super().add_side(self.short_side) + super().add_side(self.long_side) + + def perimeter(self) -> float: + return (self.short_side.length + self.long_side.length) * 2 + + def area(self) -> float: + return self.short_side.length * self.long_side.length + + +@dataclass +class Square(Rectangle): + """ + a structure which represents a + geometrical square on a 2D surface + >>> square_one = Square(5) + >>> square_one.perimeter() + 20 + >>> square_one.area() + 25 + """ + + def __init__(self, side_length: float) -> None: + super().__init__(side_length, side_length) + + def perimeter(self) -> float: + return super().perimeter() + + def area(self) -> float: + return super().area() + + +if __name__ == "__main__": + __import__("doctest").testmod() From b8e7a4c76c4a4929ac2c7e784b0c151be47c1e6e Mon Sep 17 00:00:00 2001 From: MC <129918860+FishyGitHubUser@users.noreply.github.com> Date: Sat, 25 Nov 2023 19:17:59 +0800 Subject: [PATCH 297/306] Fix typo in knight_tour.py (#11173) --- backtracking/knight_tour.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/backtracking/knight_tour.py b/backtracking/knight_tour.py index cc88307b7..5f7dee8d9 100644 --- a/backtracking/knight_tour.py +++ b/backtracking/knight_tour.py @@ -79,7 +79,7 @@ def open_knight_tour(n: int) -> list[list[int]]: >>> open_knight_tour(2) Traceback (most recent call last): ... - ValueError: Open Kight Tour cannot be performed on a board of size 2 + ValueError: Open Knight Tour cannot be performed on a board of size 2 """ board = [[0 for i in range(n)] for j in range(n)] @@ -91,7 +91,7 @@ def open_knight_tour(n: int) -> list[list[int]]: return board board[i][j] = 0 - msg = f"Open Kight Tour cannot be performed on a board of size {n}" + msg = f"Open Knight Tour cannot be performed on a board of size {n}" raise ValueError(msg) From 5898b9603bbe9b449cf5a2e331cf0c7d3245a788 Mon Sep 17 00:00:00 2001 From: Rahid Zeynalov <44039543+rahidzeynal@users.noreply.github.com> Date: Sat, 25 Nov 2023 15:25:46 +0400 Subject: [PATCH 298/306] Typo deicmal -> decimal (#11169) --- bit_manipulation/is_even.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bit_manipulation/is_even.py b/bit_manipulation/is_even.py index ba036f35a..6f95a1160 100644 --- a/bit_manipulation/is_even.py +++ b/bit_manipulation/is_even.py @@ -1,7 +1,7 @@ def is_even(number: int) -> bool: """ return true if the input integer is even - Explanation: Lets take a look at the following deicmal to binary conversions + Explanation: Lets take a look at the following decimal to binary conversions 2 => 10 14 => 1110 100 => 1100100 From 4151a13b57fbd881d3fce3bb61101fe58ad541ae Mon Sep 17 00:00:00 2001 From: Clark <1009013283@qq.com> Date: Sat, 25 Nov 2023 20:26:03 +0800 Subject: [PATCH 299/306] add graphs/ant_colony_optimization_algorithms.py (#11163) * add ant_colonyant_colony_optimization_algorithms.py * Modify details * Modify type annotation * Add tests for KeyError, IndexError, StopIteration, etc. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- graphs/ant_colony_optimization_algorithms.py | 226 +++++++++++++++++++ 1 file changed, 226 insertions(+) create mode 100644 graphs/ant_colony_optimization_algorithms.py diff --git a/graphs/ant_colony_optimization_algorithms.py b/graphs/ant_colony_optimization_algorithms.py new file mode 100644 index 000000000..652ad6144 --- /dev/null +++ b/graphs/ant_colony_optimization_algorithms.py @@ -0,0 +1,226 @@ +""" +Use an ant colony optimization algorithm to solve the travelling salesman problem (TSP) +which asks the following question: +"Given a list of cities and the distances between each pair of cities, what is the + shortest possible route that visits each city exactly once and returns to the origin + city?" + +https://en.wikipedia.org/wiki/Ant_colony_optimization_algorithms +https://en.wikipedia.org/wiki/Travelling_salesman_problem + +Author: Clark +""" + +import copy +import random + +cities = { + 0: [0, 0], + 1: [0, 5], + 2: [3, 8], + 3: [8, 10], + 4: [12, 8], + 5: [12, 4], + 6: [8, 0], + 7: [6, 2], +} + + +def main( + cities: dict[int, list[int]], + ants_num: int, + iterations_num: int, + pheromone_evaporation: float, + alpha: float, + beta: float, + q: float, # Pheromone system parameters Q,which is a constant +) -> tuple[list[int], float]: + """ + Ant colony algorithm main function + >>> main(cities=cities, ants_num=10, iterations_num=20, + ... pheromone_evaporation=0.7, alpha=1.0, beta=5.0, q=10) + ([0, 1, 2, 3, 4, 5, 6, 7, 0], 37.909778143828696) + >>> main(cities={0: [0, 0], 1: [2, 2]}, ants_num=5, iterations_num=5, + ... pheromone_evaporation=0.7, alpha=1.0, beta=5.0, q=10) + ([0, 1, 0], 5.656854249492381) + >>> main(cities={0: [0, 0], 1: [2, 2], 4: [4, 4]}, ants_num=5, iterations_num=5, + ... pheromone_evaporation=0.7, alpha=1.0, beta=5.0, q=10) + Traceback (most recent call last): + ... + IndexError: list index out of range + >>> main(cities={}, ants_num=5, iterations_num=5, + ... pheromone_evaporation=0.7, alpha=1.0, beta=5.0, q=10) + Traceback (most recent call last): + ... + StopIteration + >>> main(cities={0: [0, 0], 1: [2, 2]}, ants_num=0, iterations_num=5, + ... pheromone_evaporation=0.7, alpha=1.0, beta=5.0, q=10) + ([], inf) + >>> main(cities={0: [0, 0], 1: [2, 2]}, ants_num=5, iterations_num=0, + ... pheromone_evaporation=0.7, alpha=1.0, beta=5.0, q=10) + ([], inf) + >>> main(cities={0: [0, 0], 1: [2, 2]}, ants_num=5, iterations_num=5, + ... pheromone_evaporation=1, alpha=1.0, beta=5.0, q=10) + ([0, 1, 0], 5.656854249492381) + >>> main(cities={0: [0, 0], 1: [2, 2]}, ants_num=5, iterations_num=5, + ... pheromone_evaporation=0, alpha=1.0, beta=5.0, q=10) + ([0, 1, 0], 5.656854249492381) + """ + # Initialize the pheromone matrix + cities_num = len(cities) + pheromone = [[1.0] * cities_num] * cities_num + + best_path: list[int] = [] + best_distance = float("inf") + for _ in range(iterations_num): + ants_route = [] + for _ in range(ants_num): + unvisited_cities = copy.deepcopy(cities) + current_city = {next(iter(cities.keys())): next(iter(cities.values()))} + del unvisited_cities[next(iter(current_city.keys()))] + ant_route = [next(iter(current_city.keys()))] + while unvisited_cities: + current_city, unvisited_cities = city_select( + pheromone, current_city, unvisited_cities, alpha, beta + ) + ant_route.append(next(iter(current_city.keys()))) + ant_route.append(0) + ants_route.append(ant_route) + + pheromone, best_path, best_distance = pheromone_update( + pheromone, + cities, + pheromone_evaporation, + ants_route, + q, + best_path, + best_distance, + ) + return best_path, best_distance + + +def distance(city1: list[int], city2: list[int]) -> float: + """ + Calculate the distance between two coordinate points + >>> distance([0, 0], [3, 4] ) + 5.0 + >>> distance([0, 0], [-3, 4] ) + 5.0 + >>> distance([0, 0], [-3, -4] ) + 5.0 + """ + return (((city1[0] - city2[0]) ** 2) + ((city1[1] - city2[1]) ** 2)) ** 0.5 + + +def pheromone_update( + pheromone: list[list[float]], + cities: dict[int, list[int]], + pheromone_evaporation: float, + ants_route: list[list[int]], + q: float, # Pheromone system parameters Q,which is a constant + best_path: list[int], + best_distance: float, +) -> tuple[list[list[float]], list[int], float]: + """ + Update pheromones on the route and update the best route + >>> + >>> pheromone_update(pheromone=[[1.0, 1.0], [1.0, 1.0]], + ... cities={0: [0,0], 1: [2,2]}, pheromone_evaporation=0.7, + ... ants_route=[[0, 1, 0]], q=10, best_path=[], + ... best_distance=float("inf")) + ([[0.7, 4.235533905932737], [4.235533905932737, 0.7]], [0, 1, 0], 5.656854249492381) + >>> pheromone_update(pheromone=[], + ... cities={0: [0,0], 1: [2,2]}, pheromone_evaporation=0.7, + ... ants_route=[[0, 1, 0]], q=10, best_path=[], + ... best_distance=float("inf")) + Traceback (most recent call last): + ... + IndexError: list index out of range + >>> pheromone_update(pheromone=[[1.0, 1.0], [1.0, 1.0]], + ... cities={}, pheromone_evaporation=0.7, + ... ants_route=[[0, 1, 0]], q=10, best_path=[], + ... best_distance=float("inf")) + Traceback (most recent call last): + ... + KeyError: 0 + """ + for a in range(len(cities)): # Update the volatilization of pheromone on all routes + for b in range(len(cities)): + pheromone[a][b] *= pheromone_evaporation + for ant_route in ants_route: + total_distance = 0.0 + for i in range(len(ant_route) - 1): # Calculate total distance + total_distance += distance(cities[ant_route[i]], cities[ant_route[i + 1]]) + delta_pheromone = q / total_distance + for i in range(len(ant_route) - 1): # Update pheromones + pheromone[ant_route[i]][ant_route[i + 1]] += delta_pheromone + pheromone[ant_route[i + 1]][ant_route[i]] = pheromone[ant_route[i]][ + ant_route[i + 1] + ] + + if total_distance < best_distance: + best_path = ant_route + best_distance = total_distance + + return pheromone, best_path, best_distance + + +def city_select( + pheromone: list[list[float]], + current_city: dict[int, list[int]], + unvisited_cities: dict[int, list[int]], + alpha: float, + beta: float, +) -> tuple[dict[int, list[int]], dict[int, list[int]]]: + """ + Choose the next city for ants + >>> city_select(pheromone=[[1.0, 1.0], [1.0, 1.0]], current_city={0: [0, 0]}, + ... unvisited_cities={1: [2, 2]}, alpha=1.0, beta=5.0) + ({1: [2, 2]}, {}) + >>> city_select(pheromone=[], current_city={0: [0,0]}, + ... unvisited_cities={1: [2, 2]}, alpha=1.0, beta=5.0) + Traceback (most recent call last): + ... + IndexError: list index out of range + >>> city_select(pheromone=[[1.0, 1.0], [1.0, 1.0]], current_city={}, + ... unvisited_cities={1: [2, 2]}, alpha=1.0, beta=5.0) + Traceback (most recent call last): + ... + StopIteration + >>> city_select(pheromone=[[1.0, 1.0], [1.0, 1.0]], current_city={0: [0, 0]}, + ... unvisited_cities={}, alpha=1.0, beta=5.0) + Traceback (most recent call last): + ... + IndexError: list index out of range + """ + probabilities = [] + for city in unvisited_cities: + city_distance = distance( + unvisited_cities[city], next(iter(current_city.values())) + ) + probability = (pheromone[city][next(iter(current_city.keys()))] ** alpha) * ( + (1 / city_distance) ** beta + ) + probabilities.append(probability) + + chosen_city_i = random.choices( + list(unvisited_cities.keys()), weights=probabilities + )[0] + chosen_city = {chosen_city_i: unvisited_cities[chosen_city_i]} + del unvisited_cities[next(iter(chosen_city.keys()))] + return chosen_city, unvisited_cities + + +if __name__ == "__main__": + best_path, best_distance = main( + cities=cities, + ants_num=10, + iterations_num=20, + pheromone_evaporation=0.7, + alpha=1.0, + beta=5.0, + q=10, + ) + + print(f"{best_path = }") + print(f"{best_distance = }") From 050b2a6e2cf0e474b75cf48abe4aa134b97643e4 Mon Sep 17 00:00:00 2001 From: moaldeen <132774635+moaldeen@users.noreply.github.com> Date: Sat, 25 Nov 2023 08:31:17 -0500 Subject: [PATCH 300/306] Bug fix combinations (#11158) * Update all_combinations.py The original implementation had limitations in handling edge cases and certain input parameters, leading to potential RecursionError. * Update all_combinations.py Added checks to handle cases where n or k are negative or where k is greater than n. In such scenarios, the function now returns an empty list, avoiding invalid recursive calls. * Update error handling Added checks to handle cases where `n` or `k` are negative or where `k` is greater than `n`. In such scenarios, the function now returns an empty list, avoiding invalid recursive calls. * Update backtracking/all_combinations.py * Update all_combinations.py --------- Co-authored-by: Christian Clauss --- backtracking/all_combinations.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/backtracking/all_combinations.py b/backtracking/all_combinations.py index ecbcc5882..407304948 100644 --- a/backtracking/all_combinations.py +++ b/backtracking/all_combinations.py @@ -26,9 +26,11 @@ def generate_all_combinations(n: int, k: int) -> list[list[int]]: >>> generate_all_combinations(n=10, k=-1) Traceback (most recent call last): ... - RecursionError: maximum recursion depth exceeded + ValueError: k must not be negative >>> generate_all_combinations(n=-1, k=10) - [] + Traceback (most recent call last): + ... + ValueError: n must not be negative >>> generate_all_combinations(n=5, k=4) [[1, 2, 3, 4], [1, 2, 3, 5], [1, 2, 4, 5], [1, 3, 4, 5], [2, 3, 4, 5]] >>> from itertools import combinations @@ -36,6 +38,10 @@ def generate_all_combinations(n: int, k: int) -> list[list[int]]: ... for n in range(1, 6) for k in range(1, 6)) True """ + if k < 0: + raise ValueError("k must not be negative") + if n < 0: + raise ValueError("n must not be negative") result: list[list[int]] = [] create_all_state(1, n, k, [], result) From 8b39a0fb54d0f63489952606d2036d1a63f981e3 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 25 Nov 2023 14:53:18 +0100 Subject: [PATCH 301/306] [pre-commit.ci] pre-commit autoupdate (#11154) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.1.4 → v0.1.6](https://github.com/astral-sh/ruff-pre-commit/compare/v0.1.4...v0.1.6) - [github.com/psf/black: 23.10.1 → 23.11.0](https://github.com/psf/black/compare/23.10.1...23.11.0) - [github.com/tox-dev/pyproject-fmt: 1.4.1 → 1.5.1](https://github.com/tox-dev/pyproject-fmt/compare/1.4.1...1.5.1) - [github.com/pre-commit/mirrors-mypy: v1.6.1 → v1.7.0](https://github.com/pre-commit/mirrors-mypy/compare/v1.6.1...v1.7.0) * updating DIRECTORY.md * Update spiral_print.py * Update matrix/spiral_print.py * Update matrix/spiral_print.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 8 ++++---- DIRECTORY.md | 1 - matrix/spiral_print.py | 2 +- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1bb3de782..9a0f78fdd 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,12 +16,12 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.4 + rev: v0.1.6 hooks: - id: ruff - repo: https://github.com/psf/black - rev: 23.10.1 + rev: 23.11.0 hooks: - id: black @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "1.4.1" + rev: "1.5.1" hooks: - id: pyproject-fmt @@ -51,7 +51,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.6.1 + rev: v1.7.0 hooks: - id: mypy args: diff --git a/DIRECTORY.md b/DIRECTORY.md index cb4b00b04..438950325 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1310,7 +1310,6 @@ * [Fetch Well Rx Price](web_programming/fetch_well_rx_price.py) * [Get Amazon Product Data](web_programming/get_amazon_product_data.py) * [Get Imdb Top 250 Movies Csv](web_programming/get_imdb_top_250_movies_csv.py) - * [Get Imdbtop](web_programming/get_imdbtop.py) * [Get Ip Geolocation](web_programming/get_ip_geolocation.py) * [Get Top Billionaires](web_programming/get_top_billionaires.py) * [Get Top Hn Posts](web_programming/get_top_hn_posts.py) diff --git a/matrix/spiral_print.py b/matrix/spiral_print.py index 5eef263f7..7ba0a2751 100644 --- a/matrix/spiral_print.py +++ b/matrix/spiral_print.py @@ -116,7 +116,7 @@ def spiral_traversal(matrix: list[list]) -> list[int]: [1, 2, 3, 4, 8, 12, 11, 10, 9, 5, 6, 7] + spiral_traversal([]) """ if matrix: - return list(matrix.pop(0)) + spiral_traversal(list(zip(*matrix))[::-1]) + return list(matrix.pop(0)) + spiral_traversal(list(zip(*matrix))[::-1]) # type: ignore else: return [] From 86ae30d29e4813c2ef071d7d27f1302b6be6cc0c Mon Sep 17 00:00:00 2001 From: Harsh Kumar <61012869+cyrixninja@users.noreply.github.com> Date: Sat, 25 Nov 2023 19:50:42 +0530 Subject: [PATCH 302/306] Create Spearman's rank correlation coefficient (#11155) * Create spearman_rank_correlation_coefficient.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed Issues * Added More Description * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed Issues * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Tried Fixing Issues * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Tried Fixing Issues * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed Issues * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed Issues * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/spearman_rank_correlation_coefficient.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../spearman_rank_correlation_coefficient.py | 82 +++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 maths/spearman_rank_correlation_coefficient.py diff --git a/maths/spearman_rank_correlation_coefficient.py b/maths/spearman_rank_correlation_coefficient.py new file mode 100644 index 000000000..32ff6b9e3 --- /dev/null +++ b/maths/spearman_rank_correlation_coefficient.py @@ -0,0 +1,82 @@ +from collections.abc import Sequence + + +def assign_ranks(data: Sequence[float]) -> list[int]: + """ + Assigns ranks to elements in the array. + + :param data: List of floats. + :return: List of ints representing the ranks. + + Example: + >>> assign_ranks([3.2, 1.5, 4.0, 2.7, 5.1]) + [3, 1, 4, 2, 5] + + >>> assign_ranks([10.5, 8.1, 12.4, 9.3, 11.0]) + [3, 1, 5, 2, 4] + """ + ranked_data = sorted((value, index) for index, value in enumerate(data)) + ranks = [0] * len(data) + + for position, (_, index) in enumerate(ranked_data): + ranks[index] = position + 1 + + return ranks + + +def calculate_spearman_rank_correlation( + variable_1: Sequence[float], variable_2: Sequence[float] +) -> float: + """ + Calculates Spearman's rank correlation coefficient. + + :param variable_1: List of floats representing the first variable. + :param variable_2: List of floats representing the second variable. + :return: Spearman's rank correlation coefficient. + + Example Usage: + + >>> x = [1, 2, 3, 4, 5] + >>> y = [5, 4, 3, 2, 1] + >>> calculate_spearman_rank_correlation(x, y) + -1.0 + + >>> x = [1, 2, 3, 4, 5] + >>> y = [2, 4, 6, 8, 10] + >>> calculate_spearman_rank_correlation(x, y) + 1.0 + + >>> x = [1, 2, 3, 4, 5] + >>> y = [5, 1, 2, 9, 5] + >>> calculate_spearman_rank_correlation(x, y) + 0.6 + """ + n = len(variable_1) + rank_var1 = assign_ranks(variable_1) + rank_var2 = assign_ranks(variable_2) + + # Calculate differences of ranks + d = [rx - ry for rx, ry in zip(rank_var1, rank_var2)] + + # Calculate the sum of squared differences + d_squared = sum(di**2 for di in d) + + # Calculate the Spearman's rank correlation coefficient + rho = 1 - (6 * d_squared) / (n * (n**2 - 1)) + + return rho + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + # Example usage: + print( + f"{calculate_spearman_rank_correlation([1, 2, 3, 4, 5], [2, 4, 6, 8, 10]) = }" + ) + + print(f"{calculate_spearman_rank_correlation([1, 2, 3, 4, 5], [5, 4, 3, 2, 1]) = }") + + print(f"{calculate_spearman_rank_correlation([1, 2, 3, 4, 5], [5, 1, 2, 9, 5]) = }") From 84a1533fd5d262dae767a9298de1c1d7fcb2bec9 Mon Sep 17 00:00:00 2001 From: Suyash Dongre <109069262+Suyashd999@users.noreply.github.com> Date: Sat, 25 Nov 2023 19:59:44 +0530 Subject: [PATCH 303/306] Added doctest to binary_search_tree.py (#11145) * Added doctest to binary_search_tree.py * Apply suggestions from code review --------- Co-authored-by: Christian Clauss --- .../binary_tree/binary_search_tree.py | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/data_structures/binary_tree/binary_search_tree.py b/data_structures/binary_tree/binary_search_tree.py index f08f278a8..9071f03dc 100644 --- a/data_structures/binary_tree/binary_search_tree.py +++ b/data_structures/binary_tree/binary_search_tree.py @@ -198,6 +198,30 @@ class BinarySearchTree: return self def search(self, value) -> Node | None: + """ + >>> tree = BinarySearchTree().insert(10, 20, 30, 40, 50) + >>> tree.search(10) + {'10': (None, {'20': (None, {'30': (None, {'40': (None, 50)})})})} + >>> tree.search(20) + {'20': (None, {'30': (None, {'40': (None, 50)})})} + >>> tree.search(30) + {'30': (None, {'40': (None, 50)})} + >>> tree.search(40) + {'40': (None, 50)} + >>> tree.search(50) + 50 + >>> tree.search(5) is None # element not present + True + >>> tree.search(0) is None # element not present + True + >>> tree.search(-5) is None # element not present + True + >>> BinarySearchTree().search(10) + Traceback (most recent call last): + ... + IndexError: Warning: Tree is empty! please use another. + """ + if self.empty(): raise IndexError("Warning: Tree is empty! please use another.") else: @@ -210,6 +234,15 @@ class BinarySearchTree: def get_max(self, node: Node | None = None) -> Node | None: """ We go deep on the right branch + + >>> BinarySearchTree().insert(10, 20, 30, 40, 50).get_max() + 50 + >>> BinarySearchTree().insert(-5, -1, 0.1, -0.3, -4.5).get_max() + {'0.1': (-0.3, None)} + >>> BinarySearchTree().insert(1, 78.3, 30, 74.0, 1).get_max() + {'78.3': ({'30': (1, 74.0)}, None)} + >>> BinarySearchTree().insert(1, 783, 30, 740, 1).get_max() + {'783': ({'30': (1, 740)}, None)} """ if node is None: if self.root is None: @@ -224,6 +257,15 @@ class BinarySearchTree: def get_min(self, node: Node | None = None) -> Node | None: """ We go deep on the left branch + + >>> BinarySearchTree().insert(10, 20, 30, 40, 50).get_min() + {'10': (None, {'20': (None, {'30': (None, {'40': (None, 50)})})})} + >>> BinarySearchTree().insert(-5, -1, 0, -0.3, -4.5).get_min() + {'-5': (None, {'-1': (-4.5, {'0': (-0.3, None)})})} + >>> BinarySearchTree().insert(1, 78.3, 30, 74.0, 1).get_min() + {'1': (None, {'78.3': ({'30': (1, 74.0)}, None)})} + >>> BinarySearchTree().insert(1, 783, 30, 740, 1).get_min() + {'1': (None, {'783': ({'30': (1, 740)}, None)})} """ if node is None: node = self.root From 154e5e8681b7ae9711fbef0b89f0ce365a8bf5bf Mon Sep 17 00:00:00 2001 From: Pedram_Mohajer <48964282+pedram-mohajer@users.noreply.github.com> Date: Sun, 26 Nov 2023 17:46:54 -0500 Subject: [PATCH 304/306] Update levenshtein_distance.py (#11171) * Update levenshtein_distance.py * Update levenshtein_distance.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update levenshtein_distance.py * Update levenshtein_distance.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update levenshtein_distance.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update levenshtein_distance.py * Update levenshtein_distance.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- strings/levenshtein_distance.py | 95 ++++++++++++++++++++++++++------- 1 file changed, 75 insertions(+), 20 deletions(-) diff --git a/strings/levenshtein_distance.py b/strings/levenshtein_distance.py index 7be4074dc..3af660872 100644 --- a/strings/levenshtein_distance.py +++ b/strings/levenshtein_distance.py @@ -1,20 +1,9 @@ -""" -This is a Python implementation of the levenshtein distance. -Levenshtein distance is a string metric for measuring the -difference between two sequences. - -For doctests run following command: -python -m doctest -v levenshtein-distance.py -or -python3 -m doctest -v levenshtein-distance.py - -For manual testing run: -python levenshtein-distance.py -""" +from collections.abc import Callable def levenshtein_distance(first_word: str, second_word: str) -> int: - """Implementation of the levenshtein distance in Python. + """ + Implementation of the Levenshtein distance in Python. :param first_word: the first word to measure the difference. :param second_word: the second word to measure the difference. :return: the levenshtein distance between the two words. @@ -47,7 +36,7 @@ def levenshtein_distance(first_word: str, second_word: str) -> int: current_row = [i + 1] for j, c2 in enumerate(second_word): - # Calculate insertions, deletions and substitutions + # Calculate insertions, deletions, and substitutions insertions = previous_row[j + 1] + 1 deletions = current_row[j] + 1 substitutions = previous_row[j] + (c1 != c2) @@ -62,9 +51,75 @@ def levenshtein_distance(first_word: str, second_word: str) -> int: return previous_row[-1] -if __name__ == "__main__": - first_word = input("Enter the first word:\n").strip() - second_word = input("Enter the second word:\n").strip() +def levenshtein_distance_optimized(first_word: str, second_word: str) -> int: + """ + Compute the Levenshtein distance between two words (strings). + The function is optimized for efficiency by modifying rows in place. + :param first_word: the first word to measure the difference. + :param second_word: the second word to measure the difference. + :return: the Levenshtein distance between the two words. + Examples: + >>> levenshtein_distance_optimized("planet", "planetary") + 3 + >>> levenshtein_distance_optimized("", "test") + 4 + >>> levenshtein_distance_optimized("book", "back") + 2 + >>> levenshtein_distance_optimized("book", "book") + 0 + >>> levenshtein_distance_optimized("test", "") + 4 + >>> levenshtein_distance_optimized("", "") + 0 + >>> levenshtein_distance_optimized("orchestration", "container") + 10 + """ + if len(first_word) < len(second_word): + return levenshtein_distance_optimized(second_word, first_word) - result = levenshtein_distance(first_word, second_word) - print(f"Levenshtein distance between {first_word} and {second_word} is {result}") + if len(second_word) == 0: + return len(first_word) + + previous_row = list(range(len(second_word) + 1)) + + for i, c1 in enumerate(first_word): + current_row = [i + 1] + [0] * len(second_word) + + for j, c2 in enumerate(second_word): + insertions = previous_row[j + 1] + 1 + deletions = current_row[j] + 1 + substitutions = previous_row[j] + (c1 != c2) + current_row[j + 1] = min(insertions, deletions, substitutions) + + previous_row = current_row + + return previous_row[-1] + + +def benchmark_levenshtein_distance(func: Callable) -> None: + """ + Benchmark the Levenshtein distance function. + :param str: The name of the function being benchmarked. + :param func: The function to be benchmarked. + """ + from timeit import timeit + + stmt = f"{func.__name__}('sitting', 'kitten')" + setup = f"from __main__ import {func.__name__}" + number = 25_000 + result = timeit(stmt=stmt, setup=setup, number=number) + print(f"{func.__name__:<30} finished {number:,} runs in {result:.5f} seconds") + + +if __name__ == "__main__": + # Get user input for words + first_word = input("Enter the first word for Levenshtein distance:\n").strip() + second_word = input("Enter the second word for Levenshtein distance:\n").strip() + + # Calculate and print Levenshtein distances + print(f"{levenshtein_distance(first_word, second_word) = }") + print(f"{levenshtein_distance_optimized(first_word, second_word) = }") + + # Benchmark the Levenshtein distance functions + benchmark_levenshtein_distance(levenshtein_distance) + benchmark_levenshtein_distance(levenshtein_distance_optimized) From b8600035768da179adc709814f4b455b844982cc Mon Sep 17 00:00:00 2001 From: Pedram_Mohajer <48964282+pedram-mohajer@users.noreply.github.com> Date: Mon, 27 Nov 2023 12:43:51 -0500 Subject: [PATCH 305/306] Add doctest to is_safe function (#11183) --- backtracking/n_queens.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/backtracking/n_queens.py b/backtracking/n_queens.py index 0f237d95e..2cd8c703f 100644 --- a/backtracking/n_queens.py +++ b/backtracking/n_queens.py @@ -24,6 +24,10 @@ def is_safe(board: list[list[int]], row: int, column: int) -> bool: Returns: Boolean Value + >>> is_safe([[0, 0, 0], [0, 0, 0], [0, 0, 0]], 1, 1) + True + >>> is_safe([[1, 0, 0], [0, 0, 0], [0, 0, 0]], 1, 1) + False """ n = len(board) # Size of the board From 0ac97f359f2c4b1a4b96db6a083fac95ca0cfe97 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 27 Nov 2023 19:13:24 +0100 Subject: [PATCH 306/306] [pre-commit.ci] pre-commit autoupdate (#11184) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/pre-commit/mirrors-mypy: v1.7.0 → v1.7.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.7.0...v1.7.1) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- DIRECTORY.md | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9a0f78fdd..28f83a638 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -51,7 +51,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.7.0 + rev: v1.7.1 hooks: - id: mypy args: diff --git a/DIRECTORY.md b/DIRECTORY.md index 438950325..ea0ba22bc 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -428,12 +428,16 @@ * [Haversine Distance](geodesy/haversine_distance.py) * [Lamberts Ellipsoidal Distance](geodesy/lamberts_ellipsoidal_distance.py) +## Geometry + * [Geometry](geometry/geometry.py) + ## Graphics * [Bezier Curve](graphics/bezier_curve.py) * [Vector3 For 2D Rendering](graphics/vector3_for_2d_rendering.py) ## Graphs * [A Star](graphs/a_star.py) + * [Ant Colony Optimization Algorithms](graphs/ant_colony_optimization_algorithms.py) * [Articulation Points](graphs/articulation_points.py) * [Basic Graphs](graphs/basic_graphs.py) * [Bellman Ford](graphs/bellman_ford.py) @@ -718,6 +722,7 @@ * [Sock Merchant](maths/sock_merchant.py) * [Softmax](maths/softmax.py) * [Solovay Strassen Primality Test](maths/solovay_strassen_primality_test.py) + * [Spearman Rank Correlation Coefficient](maths/spearman_rank_correlation_coefficient.py) * Special Numbers * [Armstrong Numbers](maths/special_numbers/armstrong_numbers.py) * [Automorphic Number](maths/special_numbers/automorphic_number.py)