Skip to content

Commit

Permalink
Merge pull request #143 from ashao/hw8_branch
Browse files Browse the repository at this point in the history
ashao: HW8 submission with debug log
  • Loading branch information
jakevdp committed Dec 2, 2014
2 parents c45a9c2 + 700a354 commit fc9216a
Show file tree
Hide file tree
Showing 2 changed files with 183 additions and 0 deletions.
180 changes: 180 additions & 0 deletions ashao/HW8.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,180 @@
{
"metadata": {
"name": "",
"signature": "sha256:c0e135bbdf1a58ca19a647337722cb9c0a1e8b56a9b22814dcc95d0745692985"
},
"nbformat": 3,
"nbformat_minor": 0,
"worksheets": [
{
"cells": [
{
"cell_type": "code",
"collapsed": false,
"input": [
"\"\"\"\n",
"A script to compare different root-finding algorithms.\n",
"\n",
"This version of the script is buggy and does not execute. It is your task\n",
"to find an fix these bugs.\n",
"\n",
"The output of the script sould look like:\n",
"\n",
" Benching 1D root-finder optimizers from scipy.optimize:\n",
" brenth: 604678 total function calls\n",
" brentq: 594454 total function calls\n",
" ridder: 778394 total function calls\n",
" bisect: 2148380 total function calls\n",
"\"\"\"\n",
"from itertools import product\n",
"\n",
"import numpy as np\n",
"from scipy import optimize\n",
"\n",
"FUNCTIONS = (np.tan, # Dilating map\n",
" np.tanh, # Contracting map\n",
" lambda x: x**3 + 1e-4*x, # Almost null gradient at the root\n",
" lambda x: x+np.sin(2*x), # Non monotonous function\n",
" lambda x: 1.1*x+np.sin(4*x), # Fonction with several local maxima\n",
" )\n",
"\n",
"OPTIMIZERS = (optimize.brenth, optimize.brentq,\n",
" optimize.ridder, optimize.bisect)\n",
"\n",
"\n",
"def apply_optimizer(optimizer, func, a, b):\n",
" \"\"\" Return the number of function calls given an root-finding optimizer, \n",
" a function and upper and lower bounds.\n",
" \"\"\"\n",
" return optimizer(func, a, b, full_output=True)[1].function_calls,\n",
"\n",
"\n",
"def bench_optimizer(optimizer, param_grid):\n",
" \"\"\" Find roots for all the functions, and upper and lower bounds\n",
" given and return the total number of function calls.\n",
" \"\"\" \n",
" ncalls = 0\n",
" for func, a, b in param_grid:\n",
" ncalls += sum(apply_optimizer(optimizer, func, a, b))\n",
" \n",
" return ncalls\n",
" \n",
"def compare_optimizers(optimizers):\n",
" \"\"\" Compare all the optimizers given on a grid of a few different\n",
" functions all admitting a signle root in zero and a upper and\n",
" lower bounds.\n",
" \"\"\"\n",
" random_a = -1.3 + np.random.random(size=100)\n",
" random_b = .3 + np.random.random(size=100) \n",
" \n",
" print(\"Benching 1D root-finder optimizers from scipy.optimize:\")\n",
" for optimizer in optimizers:\n",
" param_grid = product(FUNCTIONS, random_a, random_b)\n",
" ncalls = bench_optimizer(optimizer, param_grid)\n",
" print('{name}: {ncalls} total function calls'.format(\n",
" name=optimizer.__name__, ncalls=ncalls)) \n",
" \n",
"\n",
"\n",
"#if __name__ == '__main__':\n",
" #compare_optimizers(OPTIMIZERS)\n"
],
"language": "python",
"metadata": {},
"outputs": [],
"prompt_number": 87
},
{
"cell_type": "code",
"collapsed": false,
"input": [
"compare_optimizers(OPTIMIZERS)"
],
"language": "python",
"metadata": {},
"outputs": [
{
"output_type": "stream",
"stream": "stdout",
"text": [
"Benching 1D root-finder optimizers from scipy.optimize:\n",
"brenth: 603110 total function calls"
]
},
{
"output_type": "stream",
"stream": "stdout",
"text": [
"\n",
"brentq: 592013 total function calls"
]
},
{
"output_type": "stream",
"stream": "stdout",
"text": [
"\n",
"ridder: 772860 total function calls"
]
},
{
"output_type": "stream",
"stream": "stdout",
"text": [
"\n",
"bisect: 2147165 total function calls"
]
},
{
"output_type": "stream",
"stream": "stdout",
"text": [
"\n"
]
}
],
"prompt_number": 88
},
{
"cell_type": "markdown",
"metadata": {},
"source": []
},
{
"cell_type": "code",
"collapsed": false,
"input": [
"random_a = -1.3 + np.random.random(size=100)\n",
"random_b = .3 + np.random.random(size=100)\n",
"param_grid = product(FUNCTIONS, random_a, random_b)\n",
"bench_optimizer(OPTIMIZERS[4],param_grid)"
],
"language": "python",
"metadata": {},
"outputs": [
{
"ename": "IndexError",
"evalue": "tuple index out of range",
"output_type": "pyerr",
"traceback": [
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m\n\u001b[1;31mIndexError\u001b[0m Traceback (most recent call last)",
"\u001b[1;32m<ipython-input-85-1aab16027949>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[0;32m 2\u001b[0m \u001b[0mrandom_b\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;36m.3\u001b[0m \u001b[1;33m+\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrandom\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrandom\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0msize\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m100\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 3\u001b[0m \u001b[0mparam_grid\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mproduct\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mFUNCTIONS\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mrandom_a\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mrandom_b\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 4\u001b[1;33m \u001b[0mbench_optimizer\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mOPTIMIZERS\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m4\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mparam_grid\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[1;31mIndexError\u001b[0m: tuple index out of range"
]
}
],
"prompt_number": 85
},
{
"cell_type": "code",
"collapsed": false,
"input": [],
"language": "python",
"metadata": {},
"outputs": []
}
],
"metadata": {}
}
]
}
3 changes: 3 additions & 0 deletions ashao/HW8_debug.log
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
Homework 8 Debugging Log:

An initial inspection of the code yielded a potential bug where compare_optimizers was using the global variable OPTIMIZERS as opposed to the function input. To overcome the first error thrown (an invalid operator) in 'bench_optimizer', the nested loop within the sum function was not working as intended. The for loop was broken out and a variable called ncalls was explicitly used. The compare_optimizers executes, but the output for anything other than the first function returns a function call of 0. Testing each algorithm individually using a separate driver routine yields the expected behavior. Something was likely wrong with the iterators. The 'product' function was the implicated cause of the error because the optimizer name suggested that the for loop in 'compare_optimizers' was behaving as expected. Moving the 'product' line inside of the for-loop corrects the problem and the script yields the expected behavior.

0 comments on commit fc9216a

Please sign in to comment.