Library Kleene
Section Definitions.
Inductive PRFunction : nat -> Set :=
| Zero : PRFunction 1
| Successor : PRFunction 1
| Projection : forall {m k:nat}, k < m -> PRFunction m
| Composition : forall {k m:nat} (g:PRFunction m) (fs:t (PRFunction k) m), PRFunction k
| Recursion : forall {k:nat} (g:PRFunction k) (h:PRFunction (2+k)), PRFunction (1+k)
| Minimization : forall {k:nat} (h:PRFunction (1+k)), PRFunction k
.
Inductive PRFunction : nat -> Set :=
| Zero : PRFunction 1
| Successor : PRFunction 1
| Projection : forall {m k:nat}, k < m -> PRFunction m
| Composition : forall {k m:nat} (g:PRFunction m) (fs:t (PRFunction k) m), PRFunction k
| Recursion : forall {k:nat} (g:PRFunction k) (h:PRFunction (2+k)), PRFunction (1+k)
| Minimization : forall {k:nat} (h:PRFunction (1+k)), PRFunction k
.
Kleene is imprecise about how e.g. composition should behave on undefined arguments:
does should (Composition g fs) be undefined if one of the fs is undefined, even if
its value is never used? We believe the second approach to be the case (it is what
is assumed in typical proofs of Turing completeness).
Fixpoint all_defined {n} (v:t (option nat) n) : bool :=
match v with
| [] => true
| (Some _) :: v' => all_defined v'
| None :: _ => false
end.
match v with
| [] => true
| (Some _) :: v' => all_defined v'
| None :: _ => false
end.
Auxiliary function for minimization: we need to specify how many computation steps we
want to allow, as all Coq functions must be total. We do a &dlquote;diagonal&urquote;
search.
Fixpoint find_zero_from {k} (f:t (option nat) (1+k) -> option nat) (ns:t (option nat) k) (init:nat) (steps:nat) : option nat :=
match steps with
| O => None
| S m => match f (shiftin (Some init) ns) with
| None => None
| Some O => Some init
| Some (S _) => find_zero_from f ns (S init) m
end end.
match steps with
| O => None
| S m => match f (shiftin (Some init) ns) with
| None => None
| Some O => Some init
| Some (S _) => find_zero_from f ns (S init) m
end end.
We'd love a readable definition of evaluation, but the dependent type of f makes it
tricky to write. So... First we define the fixpoint construction over the option type, to cater for undefinedness.
This is the evaluation function we actually want to use.
Definition eval {m} (f:PRFunction m) (steps:nat) (ns:t nat m) : option nat := eval_opt f steps (map Some ns).
End Definitions.
End Definitions.
Sanity checks
Our first goal is to prove that evaluation works as expected. This requires some preliminary properties about the auxiliary functions.
Predicate all_defined works as expected.
Lemma all_defined_map_Some : forall n v, all_defined (n:=n) (map Some v) = true.
Lemma all_defined_false : forall n v, all_defined (n:=n) v = false -> exists Hi, v[@Hi] = None.
Lemma all_defined_true : forall n v, all_defined (n:=n) v = true -> forall Hi, v[@Hi] <> None.
Lemma all_defined_false' : forall n v Hi, v[@Hi] = None -> all_defined (n:=n) v = false.
Lemma all_defined_false : forall n v, all_defined (n:=n) v = false -> exists Hi, v[@Hi] = None.
Lemma all_defined_true : forall n v, all_defined (n:=n) v = true -> forall Hi, v[@Hi] <> None.
Lemma all_defined_false' : forall n v Hi, v[@Hi] = None -> all_defined (n:=n) v = false.
If find_zero_from returns a value (Some n), then we know that f(n)=0,
but also that f is positive for all values between the initial one and n.
Furthermore, the number of values we tested is bounded by the first parameter.
Lemma find_zero_from_Some : forall steps {k} f (ns:t (option nat) k) init m, find_zero_from f ns init steps = Some m ->
m < init + steps /\ f (shiftin (Some m) ns) = Some O /\ forall n, init <= n < m -> exists val, f (shiftin (Some n) ns) = Some (S val).
m < init + steps /\ f (shiftin (Some m) ns) = Some O /\ forall n, init <= n < m -> exists val, f (shiftin (Some n) ns) = Some (S val).
For convenience, we split these properties in separate lemmas.
Lemma find_zero_from_bound : forall steps {k} f (ns:t (option nat) k) init m,
find_zero_from f ns init steps = Some m -> m < init + steps.
Lemma find_zero_from_value : forall steps {k} f (ns:t (option nat) k) init m,
find_zero_from f ns init steps = Some m -> f (shiftin (Some m) ns) = Some O.
Lemma find_zero_from_middle : forall steps {k} f (ns:t (option nat) k) init m,
find_zero_from f ns init steps = Some m -> forall n, init <= n < m -> exists val, f (shiftin (Some n) ns) = Some (S val).
find_zero_from f ns init steps = Some m -> m < init + steps.
Lemma find_zero_from_value : forall steps {k} f (ns:t (option nat) k) init m,
find_zero_from f ns init steps = Some m -> f (shiftin (Some m) ns) = Some O.
Lemma find_zero_from_middle : forall steps {k} f (ns:t (option nat) k) init m,
find_zero_from f ns init steps = Some m -> forall n, init <= n < m -> exists val, f (shiftin (Some n) ns) = Some (S val).
Furthermore, if f has other zeros, they must lie outside the range we tested.
Lemma find_zero_from_min : forall steps {k} f (ns:t (option nat) k) init m, find_zero_from f ns init steps = Some m ->
forall n, f (shiftin (Some n) ns) = Some 0 -> n < init \/ m <= n.
forall n, f (shiftin (Some n) ns) = Some 0 -> n < init \/ m <= n.
The negative counterpart: if find_zero returns None, then f is either undefined at least once
in the range tested (for the given number of steps), or it is always positive.
Lemma find_zero_from_None : forall steps {k} f (ns:t (option nat) k) init,
find_zero_from f ns init steps = None ->
{ exists n, init <= n < init + steps /\ f (shiftin (Some n) ns) = None /\
forall k, init <= k < n -> exists val, f (shiftin (Some k) ns) = Some (S val)} +
{ forall n, init <= n < init + steps -> exists val, f (shiftin (Some n) ns) = Some (S val) }.
find_zero_from f ns init steps = None ->
{ exists n, init <= n < init + steps /\ f (shiftin (Some n) ns) = None /\
forall k, init <= k < n -> exists val, f (shiftin (Some k) ns) = Some (S val)} +
{ forall n, init <= n < init + steps -> exists val, f (shiftin (Some n) ns) = Some (S val) }.
Monotonicity.
Lemma find_zero_from_mon : forall k h ns y m st s s', s <= s' ->
find_zero_from (@eval_opt (1+k) h st) (map Some ns) m s = Some y ->
find_zero_from (@eval_opt (1+k) h st) (map Some ns) m s' = Some y.
find_zero_from (@eval_opt (1+k) h st) (map Some ns) m s = Some y ->
find_zero_from (@eval_opt (1+k) h st) (map Some ns) m s' = Some y.
A useful characterization result.
Lemma find_zero_from_compute : forall k h ns init m steps,
(forall x, x < init+m -> exists y, h (shiftin (Some x) ns) = (Some (S y))) ->
@find_zero_from k h ns init (m+steps) = find_zero_from h ns (m+init) steps.
End Auxiliary_Lemmas.
(forall x, x < init+m -> exists y, h (shiftin (Some x) ns) = (Some (S y))) ->
@find_zero_from k h ns init (m+steps) = find_zero_from h ns (m+init) steps.
End Auxiliary_Lemmas.
Since our definition of eval is very indirect, we now prove that it behaves as expected.
Section Sanity_Checks.
Lemma Zero_correct : forall n steps, eval Zero steps [n] = Some 0.
Lemma Successor_correct : forall n steps, eval Successor steps [n] = Some (S n).
Lemma Projection_correct : forall m k (Hkm: k<m) n steps, eval (Projection Hkm) steps n = Some (nth n (Fin.of_nat_lt Hkm)).
Lemma Composition_correct : forall k m (g:PRFunction m) (f:t (PRFunction k) m) (ns:t nat k) (ms:t nat m) steps,
(forall Hi, eval (nth f Hi) steps ns = Some (nth ms Hi)) -> eval (Composition g f) steps ns = eval g steps ms.
Lemma Recursion_correct_base : forall k (g:PRFunction k) (h:PRFunction (2+k)) (ns:t nat (1+k)) steps,
hd ns = 0 -> eval (Recursion g h) steps ns = eval g steps (tl ns).
Lemma Recursion_correct_step : forall k (g:PRFunction k) (h:PRFunction (2+k)) (ns:t nat (1+k)) steps x y,
hd ns = S x -> (eval (Recursion g h) steps (x :: tl ns)) = Some y ->
eval (Recursion g h) steps ns = eval h steps (x :: y :: tl ns).
Lemma Recursion_correct_step' : forall k (g:PRFunction k) (h:PRFunction (2+k)) (ns:t nat (1+k)) steps x,
hd ns = S x -> (eval (Recursion g h) steps (x :: tl ns)) = None ->
eval (Recursion g h) steps ns = None.
Lemma Minimization_correct : forall k (h:PRFunction (1+k)) (ns:t nat k) steps n,
eval (Minimization h) steps ns = (Some n) ->
exists s, eval h s (shiftin n ns) = (Some 0) /\ forall m, m < n -> exists n', eval h s (shiftin m ns) = (Some (S n')).
End Sanity_Checks.
Lemma Zero_correct : forall n steps, eval Zero steps [n] = Some 0.
Lemma Successor_correct : forall n steps, eval Successor steps [n] = Some (S n).
Lemma Projection_correct : forall m k (Hkm: k<m) n steps, eval (Projection Hkm) steps n = Some (nth n (Fin.of_nat_lt Hkm)).
Lemma Composition_correct : forall k m (g:PRFunction m) (f:t (PRFunction k) m) (ns:t nat k) (ms:t nat m) steps,
(forall Hi, eval (nth f Hi) steps ns = Some (nth ms Hi)) -> eval (Composition g f) steps ns = eval g steps ms.
Lemma Recursion_correct_base : forall k (g:PRFunction k) (h:PRFunction (2+k)) (ns:t nat (1+k)) steps,
hd ns = 0 -> eval (Recursion g h) steps ns = eval g steps (tl ns).
Lemma Recursion_correct_step : forall k (g:PRFunction k) (h:PRFunction (2+k)) (ns:t nat (1+k)) steps x y,
hd ns = S x -> (eval (Recursion g h) steps (x :: tl ns)) = Some y ->
eval (Recursion g h) steps ns = eval h steps (x :: y :: tl ns).
Lemma Recursion_correct_step' : forall k (g:PRFunction k) (h:PRFunction (2+k)) (ns:t nat (1+k)) steps x,
hd ns = S x -> (eval (Recursion g h) steps (x :: tl ns)) = None ->
eval (Recursion g h) steps ns = None.
Lemma Minimization_correct : forall k (h:PRFunction (1+k)) (ns:t nat k) steps n,
eval (Minimization h) steps ns = (Some n) ->
exists s, eval h s (shiftin n ns) = (Some 0) /\ forall m, m < n -> exists n', eval h s (shiftin m ns) = (Some (S n')).
End Sanity_Checks.
Tactics for dealing with proofs involving composition.
Ltac prove_composition_1 := intros;
do 2 rewrite <- nth_map';
do 2 rewrite <- nth_map_inv';
apply vector_1_equal;
auto.
Ltac prove_composition_2 := intros;
do 2 rewrite <- nth_map';
do 2 rewrite <- nth_map_inv';
apply vector_2_equal;
auto.
Ltac prove_composition_3 := intros;
do 2 rewrite <- nth_map';
do 2 rewrite <- nth_map_inv';
apply vector_3_equal;
auto.
These lemmas are used to define projections. Their names seem inconsistent, but they refer to the usual convention for naming projections.
Lemma aux11 : 0 < 1.
Lemma aux12 : 0 < 2.
Lemma aux22 : 1 < 2.
Lemma aux13 : 0 < 3.
Lemma aux23 : 1 < 3.
Lemma aux33 : 2 < 3.
Lemma aux12 : 0 < 2.
Lemma aux22 : 1 < 2.
Lemma aux13 : 0 < 3.
Lemma aux23 : 1 < 3.
Lemma aux33 : 2 < 3.
Definition PR_add := Recursion (Projection aux11) (Composition Successor [Projection aux23]).
Lemma add_correct : forall m n steps, eval PR_add steps [m; n] = Some (m + n).
Lemma add_correct : forall m n steps, eval PR_add steps [m; n] = Some (m + n).
Definition PR_mult := Recursion Zero (Composition PR_add [Projection aux33; Projection aux23]).
Lemma mult_correct : forall m n steps, eval PR_mult steps [m; n] = Some (m * n).
Lemma mult_correct : forall m n steps, eval PR_mult steps [m; n] = Some (m * n).
Definition PR_sign := Composition
(Recursion Zero(Composition Successor [Composition Zero [Projection aux23]]))
[Projection aux11; Projection aux11].
Lemma sign_correct_0 : forall steps, eval PR_sign steps [0] = Some (0).
Lemma sign_correct_S : forall n steps, eval PR_sign steps [S n] = Some (1).
(Recursion Zero(Composition Successor [Composition Zero [Projection aux23]]))
[Projection aux11; Projection aux11].
Lemma sign_correct_0 : forall steps, eval PR_sign steps [0] = Some (0).
Lemma sign_correct_S : forall n steps, eval PR_sign steps [S n] = Some (1).
Definition PR_pred := Composition (Recursion Zero (Projection aux13)) [Projection aux11; Zero].
Lemma pred_correct : forall n steps, eval PR_pred steps [n] = Some (pred n).
Lemma pred_correct : forall n steps, eval PR_pred steps [n] = Some (pred n).
Definition PR_minus := Composition (Recursion (Projection aux11) (Composition PR_pred [Projection aux23]))
[Projection aux22; Projection aux12].
Lemma minus_correct : forall m n steps, eval PR_minus steps [n; m] = Some (n - m).
[Projection aux22; Projection aux12].
Lemma minus_correct : forall m n steps, eval PR_minus steps [n; m] = Some (n - m).
Definition PR_gt := Composition PR_sign [PR_minus].
Lemma gt_correct_true : forall m n steps, n > m -> eval PR_gt steps [n; m] = Some 1.
Lemma gt_correct_false : forall m n steps, n <= m -> eval PR_gt steps [n; m] = Some 0.
Lemma gt_correct_1 : forall m n steps, n > m <-> eval PR_gt steps [n; m] = Some 1.
Lemma gt_correct_0 : forall m n steps, n <= m <-> eval PR_gt steps [n; m] = Some 0.
Lemma gt_correct_true : forall m n steps, n > m -> eval PR_gt steps [n; m] = Some 1.
Lemma gt_correct_false : forall m n steps, n <= m -> eval PR_gt steps [n; m] = Some 0.
Lemma gt_correct_1 : forall m n steps, n > m <-> eval PR_gt steps [n; m] = Some 1.
Lemma gt_correct_0 : forall m n steps, n <= m <-> eval PR_gt steps [n; m] = Some 0.
Definition PR_le := Composition PR_minus [Composition Successor [Composition Zero [Projection aux12]]; PR_gt].
Lemma le_correct_true : forall m n steps, m <= n -> eval PR_le steps [m; n] = Some 1.
Lemma le_correct_false : forall m n steps, n < m -> eval PR_le steps [m; n] = Some 0.
Lemma le_correct_1 : forall m n steps, m <= n <-> eval PR_le steps [m; n] = Some 1.
Lemma le_correct_0 : forall m n steps, n < m <-> eval PR_le steps [m; n] = Some 0.
Lemma le_correct_true : forall m n steps, m <= n -> eval PR_le steps [m; n] = Some 1.
Lemma le_correct_false : forall m n steps, n < m -> eval PR_le steps [m; n] = Some 0.
Lemma le_correct_1 : forall m n steps, m <= n <-> eval PR_le steps [m; n] = Some 1.
Lemma le_correct_0 : forall m n steps, n < m <-> eval PR_le steps [m; n] = Some 0.
Definition PR_equal := Composition PR_mult [PR_le; Composition PR_le [Projection aux22; Projection aux12]].
Lemma equal_correct_true : forall m n steps, m = n -> eval PR_equal steps [m; n] = Some 1.
Lemma equal_correct_false : forall m n steps, m <> n -> eval PR_equal steps [m; n] = Some 0.
Lemma equal_correct_1 : forall m n steps, m = n <-> eval PR_equal steps [m; n] = Some 1.
Lemma equal_correct_0 : forall m n steps, m <> n <-> eval PR_equal steps [m; n] = Some 0.
Lemma equal_correct_true : forall m n steps, m = n -> eval PR_equal steps [m; n] = Some 1.
Lemma equal_correct_false : forall m n steps, m <> n -> eval PR_equal steps [m; n] = Some 0.
Lemma equal_correct_1 : forall m n steps, m = n <-> eval PR_equal steps [m; n] = Some 1.
Lemma equal_correct_0 : forall m n steps, m <> n <-> eval PR_equal steps [m; n] = Some 0.
Definition PR_diff := Composition PR_add [PR_gt; Composition PR_gt [Projection aux22; Projection aux12]].
Lemma diff_correct_true : forall m n steps, m <> n -> eval PR_diff steps [m; n] = Some 1.
Lemma diff_correct_false : forall m n steps, m = n -> eval PR_diff steps [m; n] = Some 0.
Lemma diff_correct_1 : forall m n steps, m <> n <-> eval PR_diff steps [m; n] = Some 1.
Lemma diff_correct_0 : forall m n steps, m = n <-> eval PR_diff steps [m; n] = Some 0.
Lemma diff_correct_true : forall m n steps, m <> n -> eval PR_diff steps [m; n] = Some 1.
Lemma diff_correct_false : forall m n steps, m = n -> eval PR_diff steps [m; n] = Some 0.
Lemma diff_correct_1 : forall m n steps, m <> n <-> eval PR_diff steps [m; n] = Some 1.
Lemma diff_correct_0 : forall m n steps, m = n <-> eval PR_diff steps [m; n] = Some 0.
Definition PR_sub_aux := Composition PR_diff [Composition PR_add [Projection aux23; Projection aux33]; Projection aux13].
Definition PR_sub := Minimization PR_sub_aux.
Definition PR_sub := Minimization PR_sub_aux.
By the way, there is a typo in the definition of sub in the paper...
Lemma sub_aux_correct : forall m n k steps, eval PR_sub_aux steps [m; n; k] = Some 0 <-> m = n + k.
Lemma sub_correct_1 : forall m n steps k, eval PR_sub steps [m; n] = Some k -> k = m - n.
Lemma sub_correct_2 : forall m n steps k, eval PR_sub steps [m; n] = Some k -> n <= m.
End Examples.
Section Evaluation.
Lemma eval_opt_on_None : forall m (f:PRFunction m) steps ns Hi, ns[@Hi] = None -> eval_opt f steps ns = None.
We first prove the induction schema for partial recursive functions, which requires
induction on the depth of the construction of the function.
Fixpoint depth {m} (f:PRFunction m) : nat :=
match f with
| Zero => 0
| Successor => 0
| Projection _ => 0
| Composition g fs => 1 + Nat.max (depth g) (vmax (map depth fs))
| Recursion g h => 1 + Nat.max (depth g) (depth h)
| Minimization h => 1 + depth h
end.
Theorem PRFunction_induction : forall (P:forall (n:nat) (f:PRFunction n), Prop),
P _ Zero -> P _ Successor ->
(forall i j (Hp:i<j), P _ (Projection Hp)) ->
(forall m k g fs, (forall H, P m fs[@H]) -> P k g -> P _ (Composition g fs)) ->
(forall k g h, P _ g -> P _ h -> P (1+k) (Recursion g h)) ->
(forall k h, P _ h -> P k (Minimization h)) ->
forall n f, P n f.
Definition PRFunction_recursion : forall (F:forall (n:nat) (f:PRFunction n), Type),
F _ Zero -> F _ Successor ->
(forall i j (Hp:i<j), F _ (Projection Hp)) ->
(forall m k g fs, (forall H, F m fs[@H]) -> F k g -> F _ (Composition g fs)) ->
(forall k g h, F _ g -> F _ h -> F (1+k) (Recursion g h)) ->
(forall k h, F _ h -> F k (Minimization h)) ->
forall n f, F n f.
match f with
| Zero => 0
| Successor => 0
| Projection _ => 0
| Composition g fs => 1 + Nat.max (depth g) (vmax (map depth fs))
| Recursion g h => 1 + Nat.max (depth g) (depth h)
| Minimization h => 1 + depth h
end.
Theorem PRFunction_induction : forall (P:forall (n:nat) (f:PRFunction n), Prop),
P _ Zero -> P _ Successor ->
(forall i j (Hp:i<j), P _ (Projection Hp)) ->
(forall m k g fs, (forall H, P m fs[@H]) -> P k g -> P _ (Composition g fs)) ->
(forall k g h, P _ g -> P _ h -> P (1+k) (Recursion g h)) ->
(forall k h, P _ h -> P k (Minimization h)) ->
forall n f, P n f.
Definition PRFunction_recursion : forall (F:forall (n:nat) (f:PRFunction n), Type),
F _ Zero -> F _ Successor ->
(forall i j (Hp:i<j), F _ (Projection Hp)) ->
(forall m k g fs, (forall H, F m fs[@H]) -> F k g -> F _ (Composition g fs)) ->
(forall k g h, F _ g -> F _ h -> F (1+k) (Recursion g h)) ->
(forall k h, F _ h -> F k (Minimization h)) ->
forall n f, F n f.
Properties:
- evaluation is injective (it cannot return two different values)
- if evaluation returns a value, this is preserved when the maximum number of steps is increased
- if evaluation does not return a value (yet), then neither does it for less steps.
Lemma eval_opt_inj : forall n (f:PRFunction n) s s' ns m m',
eval_opt f s ns = Some m -> eval_opt f s' ns = Some m' -> m = m'.
Lemma eval_opt_mon : forall m (f : PRFunction m) s ns k,
eval_opt f s ns = Some k -> forall s', s' >= s -> eval_opt f s' ns = Some k.
Lemma eval_opt_mon' : forall m (f:PRFunction m) s ns,
eval_opt f s ns = None -> forall s', s' <= s -> eval_opt f s' ns = None.
Lemma eval_mon : forall m (f:PRFunction m) steps ns k, eval f steps ns = (Some k) ->
forall s', s' >= steps -> eval f s' ns = (Some k).
Lemma eval_inj_Some : forall m (f:PRFunction m) s s' ns m m', eval f s ns = Some m -> eval f s' ns = Some m' -> m = m'.
Lemma eval_inj_None : forall m (f:PRFunction m) s ns, eval f s ns = None -> forall s', s'<s -> eval f s' ns = None.
End Evaluation.
eval_opt f s ns = Some m -> eval_opt f s' ns = Some m' -> m = m'.
Lemma eval_opt_mon : forall m (f : PRFunction m) s ns k,
eval_opt f s ns = Some k -> forall s', s' >= s -> eval_opt f s' ns = Some k.
Lemma eval_opt_mon' : forall m (f:PRFunction m) s ns,
eval_opt f s ns = None -> forall s', s' <= s -> eval_opt f s' ns = None.
Lemma eval_mon : forall m (f:PRFunction m) steps ns k, eval f steps ns = (Some k) ->
forall s', s' >= steps -> eval f s' ns = (Some k).
Lemma eval_inj_Some : forall m (f:PRFunction m) s s' ns m m', eval f s ns = Some m -> eval f s' ns = Some m' -> m = m'.
Lemma eval_inj_None : forall m (f:PRFunction m) s ns, eval f s ns = None -> forall s', s'<s -> eval f s' ns = None.
End Evaluation.
Section Convergence.
Definition converges {k} (f:PRFunction k) ns y := exists steps, eval f steps ns = Some y.
Definition diverges {k} (f:PRFunction k) ns := forall steps, eval f steps ns = None.
Lemma converges_inj : forall {k} f ns y y', converges (k:=k) f ns y -> converges f ns y' -> y = y'.
Lemma converges_diverges : forall {k} f ns, (diverges (k:=k) f ns <-> forall y, ~converges f ns y).
Definition converges {k} (f:PRFunction k) ns y := exists steps, eval f steps ns = Some y.
Definition diverges {k} (f:PRFunction k) ns := forall steps, eval f steps ns = None.
Lemma converges_inj : forall {k} f ns y y', converges (k:=k) f ns y -> converges f ns y' -> y = y'.
Lemma converges_diverges : forall {k} f ns, (diverges (k:=k) f ns <-> forall y, ~converges f ns y).
Results for recursively proving convergence.
Lemma Composition_converges : forall m k g fs ns ms y,
(forall H, converges fs[@H] ns ms[@H]) -> converges g ms y ->
converges (@Composition m k g fs) ns y.
Lemma Recursion_converges_base : forall k g h ns y,
converges g (tl ns) y -> converges (@Recursion k g h) (0::tl ns) y.
Lemma Recursion_converges_step : forall k g h ns x y z,
converges (@Recursion k g h) (x::ns) y ->
converges h (x::y::ns) z -> converges (Recursion g h) (S x::ns) z.
Lemma converges_max : forall k h ns y,
(forall x, x<y -> exists z, converges h (shiftin x ns) (S z)) ->
exists s, forall x, x<y -> exists z, @Kleene.eval (S k) h s (shiftin x ns) = Some (S z).
Lemma Minimization_converges : forall k h ns y,
(forall x, x<y -> exists z, converges h (shiftin x ns) (S z)) -> converges h (shiftin y ns) 0 ->
converges (@Minimization k h) ns y.
(forall H, converges fs[@H] ns ms[@H]) -> converges g ms y ->
converges (@Composition m k g fs) ns y.
Lemma Recursion_converges_base : forall k g h ns y,
converges g (tl ns) y -> converges (@Recursion k g h) (0::tl ns) y.
Lemma Recursion_converges_step : forall k g h ns x y z,
converges (@Recursion k g h) (x::ns) y ->
converges h (x::y::ns) z -> converges (Recursion g h) (S x::ns) z.
Lemma converges_max : forall k h ns y,
(forall x, x<y -> exists z, converges h (shiftin x ns) (S z)) ->
exists s, forall x, x<y -> exists z, @Kleene.eval (S k) h s (shiftin x ns) = Some (S z).
Lemma Minimization_converges : forall k h ns y,
(forall x, x<y -> exists z, converges h (shiftin x ns) (S z)) -> converges h (shiftin y ns) 0 ->
converges (@Minimization k h) ns y.
Inversion results about convergence using each constructor.
Lemma converges_Zero : forall ns y, converges Zero ns y -> y = 0.
Lemma converges_Successor : forall ns y, converges Successor ns y -> y = S (hd ns).
Lemma converges_Projection : forall m k (H:m<k) ns y,
converges (Projection H) ns y -> y = ns[@Fin.of_nat_lt H].
Lemma converges_Composition' : forall {m k} fs g ns y,
converges (@Composition m k g fs) ns y ->
exists ms, (forall H, converges fs[@H] ns ms[@H]).
Lemma converges_Composition : forall {m k} fs g ns y,
converges (@Composition m k g fs) ns y ->
exists ms, (forall H, converges fs[@H] ns ms[@H]) /\ converges g ms y.
Lemma converges_Recursion_base : forall {m} (g:PRFunction m) h ns y,
converges (Recursion g h) ns y -> hd ns = 0 -> converges g (tl ns) y.
Lemma converges_Recursion_step : forall {m} (g:PRFunction m) h ns x y,
converges (Recursion g h) ns y -> hd ns = (S x) ->
exists z, converges (Recursion g h) (x :: tl ns) z /\ converges h (x :: z :: tl ns) y.
Lemma converges_Recursion_full : forall {m} (g:PRFunction m) h ns y,
converges (Recursion g h) ns y ->
forall x, x <= hd ns -> exists z, converges (Recursion g h) (x :: tl ns) z.
Lemma converges_Minimization: forall {m} (h:PRFunction (1+m)) ns y,
converges (Minimization h) ns y -> converges h (shiftin y ns) 0.
Lemma converges_Minimization_mon: forall {m} (h:PRFunction (1+m)) ns y,
converges (Minimization h) ns y ->
forall x, x < y -> exists z, converges h (shiftin x ns) (S z).
End Convergence.
Section Divergence.
Lemma converges_Successor : forall ns y, converges Successor ns y -> y = S (hd ns).
Lemma converges_Projection : forall m k (H:m<k) ns y,
converges (Projection H) ns y -> y = ns[@Fin.of_nat_lt H].
Lemma converges_Composition' : forall {m k} fs g ns y,
converges (@Composition m k g fs) ns y ->
exists ms, (forall H, converges fs[@H] ns ms[@H]).
Lemma converges_Composition : forall {m k} fs g ns y,
converges (@Composition m k g fs) ns y ->
exists ms, (forall H, converges fs[@H] ns ms[@H]) /\ converges g ms y.
Lemma converges_Recursion_base : forall {m} (g:PRFunction m) h ns y,
converges (Recursion g h) ns y -> hd ns = 0 -> converges g (tl ns) y.
Lemma converges_Recursion_step : forall {m} (g:PRFunction m) h ns x y,
converges (Recursion g h) ns y -> hd ns = (S x) ->
exists z, converges (Recursion g h) (x :: tl ns) z /\ converges h (x :: z :: tl ns) y.
Lemma converges_Recursion_full : forall {m} (g:PRFunction m) h ns y,
converges (Recursion g h) ns y ->
forall x, x <= hd ns -> exists z, converges (Recursion g h) (x :: tl ns) z.
Lemma converges_Minimization: forall {m} (h:PRFunction (1+m)) ns y,
converges (Minimization h) ns y -> converges h (shiftin y ns) 0.
Lemma converges_Minimization_mon: forall {m} (h:PRFunction (1+m)) ns y,
converges (Minimization h) ns y ->
forall x, x < y -> exists z, converges h (shiftin x ns) (S z).
End Convergence.
Section Divergence.
Lemmas about divergence - currently unused.
Lemma diverges_Composition_arg : forall {m k} fs g ns H,
diverges fs[@H] ns -> diverges (@Composition m k g fs) ns.
Lemma diverges_Composition_fun : forall {m k} fs g ns x,
(forall H, converges fs[@H] ns x[@H]) ->
diverges g x -> diverges (@Composition m k g fs) ns.
Lemma diverges_Recursion_ind : forall {m} (g:PRFunction m) h x ns,
diverges (Recursion g h) (x::ns) ->
forall y, x<y -> diverges (Recursion g h) (y::ns).
Lemma diverges_Recursion_base : forall {m} (g:PRFunction m) h ns,
diverges g (tl ns) -> diverges (Recursion g h) ns.
Lemma diverges_Recursion_step : forall {m} (g:PRFunction m) h x y ns,
converges (Recursion g h) (x::ns) y -> diverges h (x::y::ns)
-> forall z, x<z -> diverges (Recursion g h) (z::ns).
Lemma diverges_Minimization : forall {m} (h:PRFunction (1+m)) ns x,
(forall y, y < x -> exists z, converges h (shiftin y ns) (S z)) ->
diverges h (shiftin x ns) -> diverges (Minimization h) ns.
End Divergence.