Skip to content

Commit

Permalink
Merge pull request esa#118 from esa/Release
Browse files Browse the repository at this point in the history
release -> main for Release 0.2.3
  • Loading branch information
htoftevaag authored Aug 20, 2021
2 parents 7641d96 + 6be0131 commit e5c4992
Show file tree
Hide file tree
Showing 7 changed files with 33 additions and 24 deletions.
2 changes: 1 addition & 1 deletion docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
author = "ESA Advanced Concepts Team"

# The full version, including alpha/beta/rc tags
release = "0.2.1"
release = "0.2.3"


# -- General configuration ---------------------------------------------------
Expand Down
4 changes: 2 additions & 2 deletions environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ channels:
- conda-forge
- pytorch
dependencies:
#- cudatoolkit>=11.1
- cudatoolkit>=11.1
- loguru>=0.5.3
- matplotlib>=3.3.3
- pytest>=6.2.1
Expand All @@ -12,4 +12,4 @@ dependencies:
- scipy>=1.6.0
- sphinx>=3.4.3
- sphinx_rtd_theme>=0.5.1
- tqdm>=4.56.0
- tqdm>=4.56.0
3 changes: 2 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,15 @@

setup(
name="torchquad",
version="0.2.1",
version="0.2.3",
description="Package providing torch-based numerical integration methods.",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/esa/torchquad",
author="ESA Advanced Concepts Team",
author_email="[email protected]",
install_requires=[
"loguru>=0.5.3",
"matplotlib>=3.3.3",
"scipy>=1.6.0",
"tqdm>=4.56.1",
Expand Down
1 change: 0 additions & 1 deletion torchquad/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,4 +39,3 @@

set_log_level("INFO")
logger.info("Initializing torchquad.")
enable_cuda()
3 changes: 2 additions & 1 deletion torchquad/integration/vegas.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,8 @@ def _warmup_grid(self, warmup_N_it=5, N_samples=1000):
jf = 0 # jacobians * function
jf2 = 0

yrnd = torch.rand(size=[N_samples, self._dim])
# Multiplying by 0.99999999 as the edge case of y=1 leads to an error
yrnd = torch.rand(size=[N_samples, self._dim]) * 0.999999
x = self.map.get_X(yrnd)
f_eval = self._eval(x).squeeze()
jac = self.map.get_Jac(yrnd)
Expand Down
6 changes: 3 additions & 3 deletions torchquad/integration/vegas_stratification.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,9 +133,9 @@ def get_Y(self, nevals):
indices = self._get_indices(indices)

# Get random numbers (we get a few more just to vectorize properly)
# This might increase the memorz requirements slightly but is probably
# worth it
random_uni = torch.rand(size=[len(nevals), nevals.max(), self.dim])
# This might increase the memory requirements slightly but is probably
# worth it.
random_uni = torch.rand(size=[len(nevals), nevals.max(), self.dim]) * 0.999999

# Sum the random numbers onto the index locations and scale with dy
# Note that the resulting tensor is still slightly too large
Expand Down
38 changes: 23 additions & 15 deletions torchquad/utils/deployment_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@

from torchquad import enable_cuda
from torchquad import set_precision
from torchquad import set_log_level
from loguru import logger


def _deployment_test():
Expand All @@ -17,38 +19,44 @@ def _deployment_test():
"""
"""[summary]
"""
print()
print()
print()
print("######## TESTING DEPLOYMENT ########")
print()
set_log_level("INFO")
logger.info("####################################")
logger.info("######## TESTING DEPLOYMENT ########")
logger.info("####################################")
logger.info("")

print("Testing CUDA init... ", end="")
logger.info("Testing CUDA init... ")
# Test inititialization on GPUs if available
enable_cuda()
set_precision("double")
print("Done.")
logger.info("Done.")

print("Initializing integrators... ", end="")
logger.info("")
logger.info("####################################")

logger.info("Initializing integrators... ")
tp = Trapezoid()
sp = Simpson()
boole = Boole()
mc = MonteCarlo()
vegas = VEGAS()
print("Done.")
logger.info("Done.")

def some_test_function(x):
return torch.exp(x) * torch.pow(x, 2)

print("Testing integrate functions... ", end="")
logger.info("")
logger.info("####################################")

logger.info("Testing integrate functions... ")
tp.integrate(some_test_function, dim=1, N=101)
sp.integrate(some_test_function, dim=1, N=101)
boole.integrate(some_test_function, dim=1, N=101)
mc.integrate(some_test_function, dim=1, N=101)
vegas.integrate(some_test_function, dim=1, N=101)
print("Done.")
logger.info("Done.")
logger.info("")

print()
print()
print()
print("######## ALL DONE. ########")
logger.info("####################################")
logger.info("############ ALL DONE. #############")
logger.info("####################################")

0 comments on commit e5c4992

Please sign in to comment.