Dynamic programming (DP) is a technique for solving problems that can be broken down into overlapping subproblems. Instead of solving the same subproblem repeatedly, you solve it once, store the result, and look it up whenever it is needed again.
Two conditions signal that DP applies: the problem has overlapping subproblems (the same smaller problem appears many times) and optimal substructure (an optimal solution to the whole problem can be built from optimal solutions to its parts).
Related topic: Recursion - most top-down DP solutions begin life as a plain recursive function.
Divide and conquer breaks a problem into independent subproblems, solves each one, then combines the results. Because the subproblems do not overlap, there is nothing to memoise - plain recursion is sufficient. Merge sort is the classic example: split the array in half, sort each half, then merge the two sorted halves.
The difference from DP is that DP subproblems do overlap. If you find yourself solving the same subproblem more than once, you have moved from divide-and-conquer territory into DP territory, and memoisation becomes valuable.
# Divide and conquer: merge sort
def merge_sort(arr):
if len(arr) <= 1:
return arr
mid = len(arr) // 2
left = merge_sort(arr[:mid])
right = merge_sort(arr[mid:])
return merge(left, right)
def merge(a, b):
result = []
i = j = 0
while i < len(a) and j < len(b):
if a[i] <= b[j]:
result.append(a[i]); i += 1
else:
result.append(b[j]); j += 1
result.extend(a[i:])
result.extend(b[j:])
return result
// Divide and conquer: merge sort
void merge(vector<int>& arr, int l, int m, int r) {
vector<int> tmp;
int i = l, j = m + 1;
while (i <= m && j <= r)
tmp.push_back(arr[i] <= arr[j] ? arr[i++] : arr[j++]);
while (i <= m) tmp.push_back(arr[i++]);
while (j <= r) tmp.push_back(arr[j++]);
for (int k = l; k <= r; k++) arr[k] = tmp[k - l];
}
void mergeSort(vector<int>& arr, int l, int r) {
if (l >= r) return;
int m = l + (r - l) / 2;
mergeSort(arr, l, m);
mergeSort(arr, m + 1, r);
merge(arr, l, m, r);
}
// Divide and conquer: merge sort
void Merge(int[] arr, int l, int m, int r) {
var tmp = new List<int>();
int i = l, j = m + 1;
while (i <= m && j <= r)
tmp.Add(arr[i] <= arr[j] ? arr[i++] : arr[j++]);
while (i <= m) tmp.Add(arr[i++]);
while (j <= r) tmp.Add(arr[j++]);
for (int k = l; k <= r; k++) arr[k] = tmp[k - l];
}
void MergeSort(int[] arr, int l, int r) {
if (l >= r) return;
int m = l + (r - l) / 2;
MergeSort(arr, l, m);
MergeSort(arr, m + 1, r);
Merge(arr, l, m, r);
}
// Divide and conquer: merge sort
void merge(int[] arr, int l, int m, int r) {
List<Integer> tmp = new ArrayList<>();
int i = l, j = m + 1;
while (i <= m && j <= r)
tmp.add(arr[i] <= arr[j] ? arr[i++] : arr[j++]);
while (i <= m) tmp.add(arr[i++]);
while (j <= r) tmp.add(arr[j++]);
for (int k = l; k <= r; k++) arr[k] = tmp.get(k - l);
}
void mergeSort(int[] arr, int l, int r) {
if (l >= r) return;
int m = l + (r - l) / 2;
mergeSort(arr, l, m);
mergeSort(arr, m + 1, r);
merge(arr, l, m, r);
}
The top-down approach starts with the original problem and recurses into subproblems. Before computing a subproblem, check whether the answer is already in a cache (dictionary / hash map). If it is, return the stored value immediately. If not, compute it, store it, then return it. This is called memoisation.
The Fibonacci sequence is the textbook example. A naive recursive implementation makes an exponential number of calls because fib(n-1) and fib(n-2) share many subproblems. Adding a memo reduces this to O(n).
# Top-down DP: Fibonacci with memoisation
def fib(n, memo={}):
if n in memo:
return memo[n]
if n <= 1:
return n
memo[n] = fib(n - 1, memo) + fib(n - 2, memo)
return memo[n]
// Top-down DP: Fibonacci with memoisation
unordered_map<int, long long> memo;
long long fib(int n) {
if (memo.count(n)) return memo[n];
if (n <= 1) return n;
return memo[n] = fib(n - 1) + fib(n - 2);
}
// Top-down DP: Fibonacci with memoisation
Dictionary<int, long> memo = new Dictionary<int, long>();
long Fib(int n) {
if (memo.ContainsKey(n)) return memo[n];
if (n <= 1) return n;
memo[n] = Fib(n - 1) + Fib(n - 2);
return memo[n];
}
// Top-down DP: Fibonacci with memoisation
Map<Integer, Long> memo = new HashMap<>();
long fib(int n) {
if (memo.containsKey(n)) return memo.get(n);
if (n <= 1) return n;
long val = fib(n - 1) + fib(n - 2);
memo.put(n, val);
return val;
}
The bottom-up approach flips the direction. Instead of starting at the top and recursing down, you start from the smallest subproblems and build upward, filling in a table (usually an array) iteratively. By the time you reach the entry you need, every dependency has already been computed.
Bottom-up avoids recursion entirely, so there is no risk of stack overflow on deep inputs. It also tends to have lower constant-factor overhead because there are no function-call costs. However, top-down can be easier to write because it mirrors the recursive thinking directly.
# Bottom-up DP: Fibonacci with tabulation
def fib(n):
if n <= 1:
return n
dp = [0] * (n + 1)
dp[1] = 1
for i in range(2, n + 1):
dp[i] = dp[i - 1] + dp[i - 2]
return dp[n]
// Bottom-up DP: Fibonacci with tabulation
long long fib(int n) {
if (n <= 1) return n;
vector<long long> dp(n + 1, 0);
dp[1] = 1;
for (int i = 2; i <= n; i++)
dp[i] = dp[i - 1] + dp[i - 2];
return dp[n];
}
// Bottom-up DP: Fibonacci with tabulation
long Fib(int n) {
if (n <= 1) return n;
long[] dp = new long[n + 1];
dp[1] = 1;
for (int i = 2; i <= n; i++)
dp[i] = dp[i - 1] + dp[i - 2];
return dp[n];
}
// Bottom-up DP: Fibonacci with tabulation
long fib(int n) {
if (n <= 1) return n;
long[] dp = new long[n + 1];
dp[1] = 1;
for (int i = 2; i <= n; i++)
dp[i] = dp[i - 1] + dp[i - 2];
return dp[n];
}
You are climbing a staircase with n steps. Each time you can climb 1 or 2 steps. How many distinct ways can you reach the top? This is structurally identical to Fibonacci: the number of ways to reach step i is the sum of the ways to reach step i - 1 (then take 1 step) and step i - 2 (then take 2 steps).
Below is the bottom-up solution. Notice you only ever need the previous two values, so you can optimise space from O(n) to O(1) by keeping just two variables instead of an entire array.
# Climbing stairs - O(1) space bottom-up
def climbStairs(n):
if n <= 2:
return n
prev2, prev1 = 1, 2
for _ in range(3, n + 1):
curr = prev1 + prev2
prev2, prev1 = prev1, curr
return prev1
// Climbing stairs - O(1) space bottom-up
int climbStairs(int n) {
if (n <= 2) return n;
int prev2 = 1, prev1 = 2;
for (int i = 3; i <= n; i++) {
int curr = prev1 + prev2;
prev2 = prev1;
prev1 = curr;
}
return prev1;
}
// Climbing stairs - O(1) space bottom-up
int ClimbStairs(int n) {
if (n <= 2) return n;
int prev2 = 1, prev1 = 2;
for (int i = 3; i <= n; i++) {
int curr = prev1 + prev2;
prev2 = prev1;
prev1 = curr;
}
return prev1;
}
// Climbing stairs - O(1) space bottom-up
int climbStairs(int n) {
if (n <= 2) return n;
int prev2 = 1, prev1 = 2;
for (int i = 3; i <= n; i++) {
int curr = prev1 + prev2;
prev2 = prev1;
prev1 = curr;
}
return prev1;
}