Compare commits

...

7 Commits

Author SHA1 Message Date
Rohan Anand
27b1bbacb6
Update newton_raphson.py 2023-07-16 21:07:51 +05:30
pre-commit-ci[bot]
064bc8cb88 [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
2023-07-16 15:27:43 +00:00
Rohan Anand
aeb42b1e12
Update newton_raphson.py 2023-07-16 20:57:10 +05:30
Rohan Anand
cc3d6a62fc
Update arithmetic_analysis/newton_raphson.py
Co-authored-by: Christian Clauss <cclauss@me.com>
2023-07-16 20:50:03 +05:30
Christian Clauss
cbaeaa035c
Update newton_raphson.py 2023-07-16 17:16:21 +02:00
Rohan Anand
8d6f837303
Update arithmetic_analysis/newton_raphson.py
Co-authored-by: Christian Clauss <cclauss@me.com>
2023-07-16 20:42:35 +05:30
Rohan Anand
daf594dd50
Update arithmetic_analysis/newton_raphson.py
Co-authored-by: Christian Clauss <cclauss@me.com>
2023-07-16 20:42:27 +05:30

View File

@ -1,7 +1,8 @@
# Implementing Newton Raphson method in Python
# Author: Syed Haseeb Shah (github.com/QuantumNovice)
# The Newton-Raphson method (also known as Newton's method) is a way to
# quickly find a good approximation for the root of a real-valued function
# quickly find a good approximation for the root of a real-valued function.
# https://en.wikipedia.org/wiki/Newton%27s_method
from __future__ import annotations
from sympy import diff, symbols, sympify
@ -19,7 +20,22 @@ def newton_raphson(
2.23606797749979
>>> newton_raphson("log(x)- 1", 2)
2.718281828458938
>>> from scipy.optimize import newton
>>> all(newton_raphson("log(x)- 1", 2) == newton("log(x)- 1", 2)
... for precision in (10, 100, 1000, 10000))
True
>>> newton_raphson("log(x)- 1", 2, 0)
Traceback (most recent call last):
...
ValueError: precision must be greater than zero
>>> newton_raphson("log(x)- 1", 2, -1)
Traceback (most recent call last):
...
ValueError: precision must be greater than zero
"""
if precision <= 0:
raise ValueError("precision must be greater than zero")
x = start_point
symbol = symbols("x")
@ -38,7 +54,7 @@ def newton_raphson(
if abs(function_value) < precision:
return float(x)
x = x - (function_value / derivative_value)
x -= function_value / derivative_value
return float(x)