This instance will be upgraded to Heptapod 0.31.0 (final) on 2022-05-24 at 14:00 UTC+2 (a few minutes of down time)

Commit c99e938a authored by blanchet's avatar blanchet
Browse files

tuned whitespaces (including CRLF -> LF conversion)

parent 40e1a8cb60a3
(* Author: Alexander Bentkamp, Universität des Saarlandes
*)
section \<open>Concrete Matrices\<close>
theory DL_Concrete_Matrices
imports Real "../Jordan_Normal_Form/Matrix" DL_Missing_Matrix
begin
text \<open>The following definition allows non-square-matrices, mat\_one (mat\_one n) only allows square matrices.\<close>
definition eye_matrix::"nat \<Rightarrow> nat \<Rightarrow> real mat"
where "eye_matrix nr nc = mat nr nc (\<lambda>(r, c). if r=c then 1 else 0)"
lemma eye_matrix_dim: "dim\<^sub>r (eye_matrix nr nc) = nr" "dim\<^sub>c (eye_matrix nr nc) = nc" by (simp_all add: eye_matrix_def)
lemma row_eye_matrix:
assumes "i < nr"
shows "row (eye_matrix nr nc) i = unit\<^sub>v nc i"
by (rule vec_eqI, simp add: assms eye_matrix_def vec_unit_def, simp add: eye_matrix_dim(2))
lemma unit_eq_0[simp]:
assumes i: "i \<ge> n"
shows "unit\<^sub>v n i = \<zero>\<^sub>v n"
apply (rule vec_eqI)
apply (metis (mono_tags, lifting) i leD vec_dim_vec vec_index_vec vec_unit_def vec_zero_def)
by simp
lemma mult_eye_matrix:
assumes "i < nr"
shows "(eye_matrix nr (dim\<^sub>v v) \<otimes>\<^sub>m\<^sub>v v) $ i = (if i<dim\<^sub>v v then v $ i else 0)" (is "?a $ i = ?b")
proof -
have "?a $ i = row (eye_matrix nr (dim\<^sub>v v)) i \<bullet> v" using index_mat_mult_vec assms eye_matrix_dim by auto
also have "... = unit\<^sub>v (dim\<^sub>v v) i \<bullet> v" using row_eye_matrix assms by auto
also have "... = ?b" using scalar_prod_left_unit vec_elemsI unit_eq_0 scalar_prod_left_zero by fastforce
finally show ?thesis by auto
qed
definition all1_vec::"nat \<Rightarrow> real vec"
where "all1_vec n = vec n (\<lambda>i. 1)"
definition all1_matrix::"nat \<Rightarrow> nat \<Rightarrow> real mat"
where "all1_matrix nr nc = mat nr nc (\<lambda>(r, c). 1)"
lemma all1_matrix_dim: "dim\<^sub>r (all1_matrix nr nc) = nr" "dim\<^sub>c (all1_matrix nr nc) = nc"
by (simp_all add: all1_matrix_def)
lemma row_all1_matrix:
assumes "i < nr"
shows "row (all1_matrix nr nc) i = all1_vec nc"
apply (rule vec_eqI)
apply (simp add: all1_matrix_def all1_vec_def assms)
by (simp add: all1_matrix_def all1_vec_def)
lemma all1_vec_scalar_prod:
shows "all1_vec (length xs) \<bullet> (vec_of_list xs) = sum_list xs"
proof -
have "all1_vec (length xs) \<bullet> (vec_of_list xs) = (\<Sum>i = 0..<dim\<^sub>v (vec_of_list xs). vec_of_list xs $ i)"
unfolding scalar_prod_def by (metis (no_types, lifting) all1_vec_def mult_cancel_right1 sum_ivl_cong
vec.abs_eq vec_dim_vec vec_index_vec vec_of_list.abs_eq)
also have "... = (\<Sum>i = 0..<length xs. xs ! i)" using vec.abs_eq vec_dim_vec vec_of_list.abs_eq
by (metis sum_ivl_cong vec_index_vec)
also have "... = sum_list xs" by (simp add: sum_list_sum_nth)
finally show ?thesis by auto
qed
lemma mult_all1_matrix:
assumes "i < nr"
shows "((all1_matrix nr (dim\<^sub>v v)) \<otimes>\<^sub>m\<^sub>v v) $ i = sum_list (list_of_vec v)" (is "?a $ i = sum_list (list_of_vec v)")
proof -
have "?a $ i = row (all1_matrix nr (dim\<^sub>v v)) i \<bullet> v" using index_mat_mult_vec assms all1_matrix_dim by auto
also have "... = sum_list (list_of_vec v)" unfolding row_all1_matrix[OF assms] using all1_vec_scalar_prod[of "list_of_vec v"]
by (metis vec.abs_eq vec_dim_vec vec_list vec_of_list.abs_eq)
finally show ?thesis by auto
qed
definition copy_first_matrix::"nat \<Rightarrow> nat \<Rightarrow> real mat"
where "copy_first_matrix nr nc = mat nr nc (\<lambda>(r, c). if c = 0 then 1 else 0)"
lemma copy_first_matrix_dim: "dim\<^sub>r (copy_first_matrix nr nc) = nr" "dim\<^sub>c (copy_first_matrix nr nc) = nc"
by (simp_all add: copy_first_matrix_def)
lemma row_copy_first_matrix:
assumes "i < nr"
shows "row (copy_first_matrix nr nc) i = unit\<^sub>v nc 0"
apply (rule vec_eqI)
apply (auto simp add: copy_first_matrix_def assms)[1]
by (simp add: copy_first_matrix_def)
lemma mult_copy_first_matrix:
assumes "i < nr" and "dim\<^sub>v v > 0"
shows "(copy_first_matrix nr (dim\<^sub>v v) \<otimes>\<^sub>m\<^sub>v v) $ i = v $ 0" (is "?a $ i = v $ 0")
proof -
have "?a $ i = row (copy_first_matrix nr (dim\<^sub>v v)) i \<bullet> v" using index_mat_mult_vec assms copy_first_matrix_dim by auto
also have "... = unit\<^sub>v (dim\<^sub>v v) 0 \<bullet> v" using row_copy_first_matrix assms by auto
also have "... = v $ 0" using assms(2) scalar_prod_left_unit vec_elems by blast
finally show ?thesis by auto
qed
end
(* Author: Alexander Bentkamp, Universität des Saarlandes
*)
section \<open>Concrete Matrices\<close>
theory DL_Concrete_Matrices
imports Real "../Jordan_Normal_Form/Matrix" DL_Missing_Matrix
begin
text \<open>The following definition allows non-square-matrices, mat\_one (mat\_one n) only allows square matrices.\<close>
definition eye_matrix::"nat \<Rightarrow> nat \<Rightarrow> real mat"
where "eye_matrix nr nc = mat nr nc (\<lambda>(r, c). if r=c then 1 else 0)"
lemma eye_matrix_dim: "dim\<^sub>r (eye_matrix nr nc) = nr" "dim\<^sub>c (eye_matrix nr nc) = nc" by (simp_all add: eye_matrix_def)
lemma row_eye_matrix:
assumes "i < nr"
shows "row (eye_matrix nr nc) i = unit\<^sub>v nc i"
by (rule vec_eqI, simp add: assms eye_matrix_def vec_unit_def, simp add: eye_matrix_dim(2))
lemma unit_eq_0[simp]:
assumes i: "i \<ge> n"
shows "unit\<^sub>v n i = \<zero>\<^sub>v n"
apply (rule vec_eqI)
apply (metis (mono_tags, lifting) i leD vec_dim_vec vec_index_vec vec_unit_def vec_zero_def)
by simp
lemma mult_eye_matrix:
assumes "i < nr"
shows "(eye_matrix nr (dim\<^sub>v v) \<otimes>\<^sub>m\<^sub>v v) $ i = (if i<dim\<^sub>v v then v $ i else 0)" (is "?a $ i = ?b")
proof -
have "?a $ i = row (eye_matrix nr (dim\<^sub>v v)) i \<bullet> v" using index_mat_mult_vec assms eye_matrix_dim by auto
also have "... = unit\<^sub>v (dim\<^sub>v v) i \<bullet> v" using row_eye_matrix assms by auto
also have "... = ?b" using scalar_prod_left_unit vec_elemsI unit_eq_0 scalar_prod_left_zero by fastforce
finally show ?thesis by auto
qed
definition all1_vec::"nat \<Rightarrow> real vec"
where "all1_vec n = vec n (\<lambda>i. 1)"
definition all1_matrix::"nat \<Rightarrow> nat \<Rightarrow> real mat"
where "all1_matrix nr nc = mat nr nc (\<lambda>(r, c). 1)"
lemma all1_matrix_dim: "dim\<^sub>r (all1_matrix nr nc) = nr" "dim\<^sub>c (all1_matrix nr nc) = nc"
by (simp_all add: all1_matrix_def)
lemma row_all1_matrix:
assumes "i < nr"
shows "row (all1_matrix nr nc) i = all1_vec nc"
apply (rule vec_eqI)
apply (simp add: all1_matrix_def all1_vec_def assms)
by (simp add: all1_matrix_def all1_vec_def)
lemma all1_vec_scalar_prod:
shows "all1_vec (length xs) \<bullet> (vec_of_list xs) = sum_list xs"
proof -
have "all1_vec (length xs) \<bullet> (vec_of_list xs) = (\<Sum>i = 0..<dim\<^sub>v (vec_of_list xs). vec_of_list xs $ i)"
unfolding scalar_prod_def by (metis (no_types, lifting) all1_vec_def mult_cancel_right1 sum_ivl_cong
vec.abs_eq vec_dim_vec vec_index_vec vec_of_list.abs_eq)
also have "... = (\<Sum>i = 0..<length xs. xs ! i)" using vec.abs_eq vec_dim_vec vec_of_list.abs_eq
by (metis sum_ivl_cong vec_index_vec)
also have "... = sum_list xs" by (simp add: sum_list_sum_nth)
finally show ?thesis by auto
qed
lemma mult_all1_matrix:
assumes "i < nr"
shows "((all1_matrix nr (dim\<^sub>v v)) \<otimes>\<^sub>m\<^sub>v v) $ i = sum_list (list_of_vec v)" (is "?a $ i = sum_list (list_of_vec v)")
proof -
have "?a $ i = row (all1_matrix nr (dim\<^sub>v v)) i \<bullet> v" using index_mat_mult_vec assms all1_matrix_dim by auto
also have "... = sum_list (list_of_vec v)" unfolding row_all1_matrix[OF assms] using all1_vec_scalar_prod[of "list_of_vec v"]
by (metis vec.abs_eq vec_dim_vec vec_list vec_of_list.abs_eq)
finally show ?thesis by auto
qed
definition copy_first_matrix::"nat \<Rightarrow> nat \<Rightarrow> real mat"
where "copy_first_matrix nr nc = mat nr nc (\<lambda>(r, c). if c = 0 then 1 else 0)"
lemma copy_first_matrix_dim: "dim\<^sub>r (copy_first_matrix nr nc) = nr" "dim\<^sub>c (copy_first_matrix nr nc) = nc"
by (simp_all add: copy_first_matrix_def)
lemma row_copy_first_matrix:
assumes "i < nr"
shows "row (copy_first_matrix nr nc) i = unit\<^sub>v nc 0"
apply (rule vec_eqI)
apply (auto simp add: copy_first_matrix_def assms)[1]
by (simp add: copy_first_matrix_def)
lemma mult_copy_first_matrix:
assumes "i < nr" and "dim\<^sub>v v > 0"
shows "(copy_first_matrix nr (dim\<^sub>v v) \<otimes>\<^sub>m\<^sub>v v) $ i = v $ 0" (is "?a $ i = v $ 0")
proof -
have "?a $ i = row (copy_first_matrix nr (dim\<^sub>v v)) i \<bullet> v" using index_mat_mult_vec assms copy_first_matrix_dim by auto
also have "... = unit\<^sub>v (dim\<^sub>v v) 0 \<bullet> v" using row_copy_first_matrix assms by auto
also have "... = v $ 0" using assms(2) scalar_prod_left_unit vec_elems by blast
finally show ?thesis by auto
qed
end
This diff is collapsed.
This diff is collapsed.
(* Author: Alexander Bentkamp, Universität des Saarlandes
*)
section \<open>Matrix to Vector Conversion\<close>
theory DL_Flatten_Matrix
imports Real "../Jordan_Normal_Form/Matrix"
begin
definition extract_matrix :: "(nat \<Rightarrow> 'a) \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> 'a mat" where
"extract_matrix a m n = mat m n (\<lambda>(i,j). a (i*n + j))"
definition flatten_matrix :: "'a mat \<Rightarrow> (nat \<Rightarrow> 'a)" where
"flatten_matrix A k = A $$ (k div dim\<^sub>c A, k mod dim\<^sub>c A)"
lemma two_digit_le:
fixes i j :: nat
assumes "i < m" "j < n"
shows "i*n + j < m*n"
proof -
have "(i*n + j) div n = i" "m*n div n = m" using assms by auto
then have "(i * n + j) div m div n = i div m"
by (metis Divides.div_mult2_eq mult.commute)
then show ?thesis
by (metis Divides.div_mult2_eq \<open>i < m\<close> \<open>m * n div n = m\<close> div_eq_0_iff not_less0)
qed
lemma extract_matrix_cong:
assumes "\<And>i. i < m * n \<Longrightarrow> a i = b i"
shows "extract_matrix a m n = extract_matrix b m n"
proof -
have "\<And>i j. i < m \<Longrightarrow> j < n \<Longrightarrow> a (i*n + j) = b (i*n + j)" using two_digit_le assms by blast
then show ?thesis unfolding extract_matrix_def by auto
qed
lemma extract_matrix_flatten_matrix:
"extract_matrix (flatten_matrix A) (dim\<^sub>r A) (dim\<^sub>c A) = A"
unfolding extract_matrix_def flatten_matrix_def by auto
lemma flatten_matrix_extract_matrix:
shows "\<And>k. k<m*n \<Longrightarrow> flatten_matrix (extract_matrix a m n) k = a k"
unfolding extract_matrix_def flatten_matrix_def
by (metis (no_types, lifting) Divides.div_mult2_eq case_prod_conv div_eq_0_iff mat_dim_col_mat(1)
mat_index_mat(1) div_mult_mod_eq mod_less_divisor mult.commute mult_zero_right not_gr0 not_less0)
lemma index_extract_matrix:
assumes "i<m" "j<n"
shows "extract_matrix a m n $$ (i,j) = a (i*n + j)"
unfolding extract_matrix_def using assms by simp
lemma dim_extract_matrix:
shows "dim\<^sub>r (extract_matrix as m n) = m"
and "dim\<^sub>c (extract_matrix as m n) = n"
unfolding extract_matrix_def by simp_all
end
(* Author: Alexander Bentkamp, Universität des Saarlandes
*)
section \<open>Matrix to Vector Conversion\<close>
theory DL_Flatten_Matrix
imports Real "../Jordan_Normal_Form/Matrix"
begin
definition extract_matrix :: "(nat \<Rightarrow> 'a) \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> 'a mat" where
"extract_matrix a m n = mat m n (\<lambda>(i,j). a (i*n + j))"
definition flatten_matrix :: "'a mat \<Rightarrow> (nat \<Rightarrow> 'a)" where
"flatten_matrix A k = A $$ (k div dim\<^sub>c A, k mod dim\<^sub>c A)"
lemma two_digit_le:
fixes i j :: nat
assumes "i < m" "j < n"
shows "i*n + j < m*n"
proof -
have "(i*n + j) div n = i" "m*n div n = m" using assms by auto
then have "(i * n + j) div m div n = i div m"
by (metis Divides.div_mult2_eq mult.commute)
then show ?thesis
by (metis Divides.div_mult2_eq \<open>i < m\<close> \<open>m * n div n = m\<close> div_eq_0_iff not_less0)
qed
lemma extract_matrix_cong:
assumes "\<And>i. i < m * n \<Longrightarrow> a i = b i"
shows "extract_matrix a m n = extract_matrix b m n"
proof -
have "\<And>i j. i < m \<Longrightarrow> j < n \<Longrightarrow> a (i*n + j) = b (i*n + j)" using two_digit_le assms by blast
then show ?thesis unfolding extract_matrix_def by auto
qed
lemma extract_matrix_flatten_matrix:
"extract_matrix (flatten_matrix A) (dim\<^sub>r A) (dim\<^sub>c A) = A"
unfolding extract_matrix_def flatten_matrix_def by auto
lemma flatten_matrix_extract_matrix:
shows "\<And>k. k<m*n \<Longrightarrow> flatten_matrix (extract_matrix a m n) k = a k"
unfolding extract_matrix_def flatten_matrix_def
by (metis (no_types, lifting) Divides.div_mult2_eq case_prod_conv div_eq_0_iff mat_dim_col_mat(1)
mat_index_mat(1) div_mult_mod_eq mod_less_divisor mult.commute mult_zero_right not_gr0 not_less0)
lemma index_extract_matrix:
assumes "i<m" "j<n"
shows "extract_matrix a m n $$ (i,j) = a (i*n + j)"
unfolding extract_matrix_def using assms by simp
lemma dim_extract_matrix:
shows "dim\<^sub>r (extract_matrix as m n) = m"
and "dim\<^sub>c (extract_matrix as m n) = n"
unfolding extract_matrix_def by simp_all
end
(* Author: Alexander Bentkamp, Universität des Saarlandes
*)
section \<open>Fundamental Theorem of Network Capacity\<close>
theory DL_Fundamental_Theorem_Network_Capacity
imports DL_Rank_CP_Rank DL_Deep_Model_Poly Lebesgue_Zero_Set DL_Rank_Submatrix "~~/src/HOL/Analysis/Complete_Measure" DL_Shallow_Model
begin
context deep_model_correct_params_y
begin
theorem fundamental_theorem_network_capacity_polynomial:
obtains p where "p\<noteq>0" and "vars p \<subseteq> {..<weight_space_dim}"
and "\<And>x. insertion x p \<noteq> 0 \<Longrightarrow> r ^ N_half \<le> cprank (A x)"
proof -
assume assumption:"\<And>p. p \<noteq> 0 \<Longrightarrow> vars p \<subseteq> {..<weight_space_dim} \<Longrightarrow>
(\<And>x. insertion x p \<noteq> 0 \<Longrightarrow> r ^ N_half \<le> cprank (A x)) \<Longrightarrow> thesis"
obtain p where p_def:"\<And>x. insertion x p = Determinant.det (witness_submatrix y x)" and
vars_p:"vars p \<subseteq> {..<weight_space_dim}"
using polyfun_det_deep_model[unfolded polyfun_def] by auto
then have "insertion witness_weights p \<noteq> 0"
using witness_det[unfolded Aw'_def'] witness_submatrix_def weight_space_dim_def
by (metis p_def)
then have "p\<noteq>0" by auto
have "\<And>x. insertion x p \<noteq> 0 \<Longrightarrow> r ^ N_half \<le> cprank (A x)"
proof -
fix x assume "insertion x p \<noteq> 0"
then have "Determinant.det (witness_submatrix y x) \<noteq> 0" using p_def by auto
have 0:"weight_space_dim \<le> length (map x [0..<weight_space_dim])" by auto
have "r ^ N_half \<le> mrank (A' x)"
using vec_space.rank_gt_minor[OF mat_carrierI[OF dims_A'_pow, unfolded weight_space_dim_def]
`Determinant.det (witness_submatrix y x) \<noteq> 0`[unfolded witness_submatrix_def]]
card_rows_with_1[unfolded dims_Aw'_pow] by (metis (no_types, lifting) Collect_cong dims_A'_pow(1))
also have "... \<le> cprank (A x)" using matrix_rank_le_cp_rank A'_def by auto
finally show "r ^ N_half \<le> cprank (A x)" .
qed
then show ?thesis using assumption `p\<noteq>0` `vars p \<subseteq> {..<weight_space_dim}` by blast
qed
theorem fundamental_theorem_network_capacity:
"AE x in lborel_f weight_space_dim. r ^ N_half \<le> cprank (A x)"
proof -
obtain p where "p\<noteq>0" and "vars p \<subseteq> {..<weight_space_dim}"
and "\<And>x. insertion x p \<noteq> 0 \<Longrightarrow> r ^ N_half \<le> cprank (A x)"
using fundamental_theorem_network_capacity_polynomial by metis
have null_set:"{f \<in> space (lborel_f weight_space_dim). insertion f p = 0} \<in> null_sets (lborel_f weight_space_dim)"
using lebesgue_mpoly_zero_set[OF `p\<noteq>0` `vars p \<subseteq> {..<weight_space_dim}`] by simp
have subset:"{x \<in> space (lborel_f weight_space_dim). r ^ N_half > cprank (A x)}
\<subseteq> {f \<in> space (lborel_f weight_space_dim). insertion f p = 0}"
proof
fix x assume "x\<in>{x \<in> space (lborel_f weight_space_dim). r ^ N_half > cprank (A x)}"
then have "x\<in>space (lborel_f weight_space_dim)" "r ^ N_half > cprank (A x)" by auto
then have "insertion x p = 0" using `\<And>x. insertion x p \<noteq> 0 \<Longrightarrow> r ^ N_half \<le> cprank (A x)`
HOL.contrapos_nn not_le by metis
then show "x \<in> {f \<in> space (lborel_f weight_space_dim). insertion f p = 0}" using `x\<in>space (lborel_f weight_space_dim)` by blast
qed
then show ?thesis using AE_I'[OF null_set, of "\<lambda>x. r ^ N_half \<le> cprank (A x)"] leI by blast
qed
end
(*TODO: arbitrary parameters for shallow_model?*)
context deep_model_correct_params
begin
theorem null_set_tensors_equal:
shows "AE weights_deep in lborel_f weight_space_dim.
\<not> (\<exists>weights_shallow Z. Z < r ^ N_half \<and>
tensors_from_net (insert_weights (deep_model_l rs) weights_deep)
= tensors_from_net (insert_weights (shallow_model (rs ! 0) Z (last rs) (2*N_half -1)) weights_shallow))"
(is "almost_everywhere ?M ?P")
proof -
have "0 < rs ! 0" using no_zeros deep by (metis length_0_conv length_greater_0_conv not_numeral_le_zero nth_mem)
have "\<And>x. \<not> ?P x \<Longrightarrow> cprank (deep_model_correct_params_y.A rs 0 x) < r ^ N_half"
proof -
fix x assume "\<not> ?P x"
obtain weights_shallow Z where "Z < r ^ N_half"
"tensors_from_net (insert_weights (deep_model_l rs) x) =
tensors_from_net (insert_weights (shallow_model (rs ! 0) Z (last rs) (2 * N_half - 1)) weights_shallow)"
using `\<not> ?P x` by blast
then have "cprank (tensors_from_net (insert_weights (deep_model_l rs) x) $ 0) < r ^ N_half"
using cprank_shallow_model `0 < rs ! 0` remove_insert_weights by (metis not_le order_trans)
then show "cprank (deep_model_correct_params_y.A rs 0 x) < r ^ N_half"
by (simp add: `0 < rs ! 0` deep_model_correct_params_axioms
deep_model_correct_params_y.A_def deep_model_correct_params_y.intro deep_model_correct_params_y_axioms.intro)
qed
then show ?thesis using Filter.eventually_mono using deep_model_correct_params_y.fundamental_theorem_network_capacity[OF deep_model_correct_params_y.intro,
OF deep_model_correct_params_axioms, unfolded deep_model_correct_params_y_axioms_def, OF `0 < rs ! 0`]
by fastforce
qed
theorem fundamental_theorem_network_capacity_v2:
shows "AE weights_deep in lborel_f weight_space_dim.
\<not>(\<exists>weights_shallow Z. Z < r ^ N_half \<and> (\<forall>inputs. map dim\<^sub>v inputs = input_sizes (deep_model_l rs) \<longrightarrow>
evaluate_net (insert_weights (deep_model_l rs) weights_deep) inputs
= evaluate_net (insert_weights (shallow_model (rs ! 0) Z (last rs) (2*N_half-1)) weights_shallow) inputs))"
(is "almost_everywhere ?M ?P")
proof (rule Filter.eventually_mono)
show "AE weights_deep in lborel_f weight_space_dim.
\<not> (\<exists>weights_shallow Z. Z < r ^ N_half \<and>
tensors_from_net (insert_weights (deep_model_l rs) weights_deep)
= tensors_from_net (insert_weights (shallow_model (rs ! 0) Z (last rs) (2*N_half -1)) weights_shallow))"
(is "almost_everywhere ?M ?P'")
using null_set_tensors_equal by metis
have "\<And>x. \<not>?P x \<Longrightarrow> \<not>?P' x"
proof -
fix weights_deep assume "\<not>?P weights_deep"
then obtain weights_shallow Z where 0:"Z < r ^ N_half"
and eval_eq:"\<And>inputs. map dim\<^sub>v inputs = input_sizes (deep_model_l rs) \<Longrightarrow>
evaluate_net (insert_weights (deep_model_l rs) weights_deep) inputs
= evaluate_net (insert_weights (shallow_model (rs ! 0) Z (last rs) (2*N_half-1)) weights_shallow) inputs"
by blast
have "2 \<le> length rs" using deep by linarith
have "tensors_from_net (insert_weights (deep_model_l rs) weights_deep) =
tensors_from_net (insert_weights (shallow_model (rs ! 0) Z (last rs) (2 * N_half - 1)) weights_shallow)"
using tensors_from_net_eqI[OF _ _ _ eval_eq, unfolded remove_insert_weights, OF valid_deep_model valid_shallow_model]
input_sizes_deep_model[OF `2 \<le> length rs`] input_sizes_shallow_model input_sizes_remove_weights remove_insert_weights
by (metis (no_types, lifting) N_half_def Suc_diff_1 Suc_diff_Suc Suc_le_lessD deep numeral_2_eq_2 numeral_3_eq_3 power_Suc power_eq_0_iff zero_less_power zero_power2)
then show "\<not>?P' weights_deep"
using 0 by blast
qed
then show "\<And>x. ?P' x \<Longrightarrow> ?P x" by blast
qed
end
thm deep_model_correct_params.fundamental_theorem_network_capacity_v2
end
(* Author: Alexander Bentkamp, Universität des Saarlandes
*)
section \<open>Fundamental Theorem of Network Capacity\<close>
theory DL_Fundamental_Theorem_Network_Capacity
imports DL_Rank_CP_Rank DL_Deep_Model_Poly Lebesgue_Zero_Set DL_Rank_Submatrix "~~/src/HOL/Analysis/Complete_Measure" DL_Shallow_Model
begin
context deep_model_correct_params_y
begin
theorem fundamental_theorem_network_capacity_polynomial:
obtains p where "p\<noteq>0" and "vars p \<subseteq> {..<weight_space_dim}"
and "\<And>x. insertion x p \<noteq> 0 \<Longrightarrow> r ^ N_half \<le> cprank (A x)"
proof -
assume assumption:"\<And>p. p \<noteq> 0 \<Longrightarrow> vars p \<subseteq> {..<weight_space_dim} \<Longrightarrow>
(\<And>x. insertion x p \<noteq> 0 \<Longrightarrow> r ^ N_half \<le> cprank (A x)) \<Longrightarrow> thesis"
obtain p where p_def:"\<And>x. insertion x p = Determinant.det (witness_submatrix y x)" and
vars_p:"vars p \<subseteq> {..<weight_space_dim}"
using polyfun_det_deep_model[unfolded polyfun_def] by auto
then have "insertion witness_weights p \<noteq> 0"
using witness_det[unfolded Aw'_def'] witness_submatrix_def weight_space_dim_def
by (metis p_def)
then have "p\<noteq>0" by auto
have "\<And>x. insertion x p \<noteq> 0 \<Longrightarrow> r ^ N_half \<le> cprank (A x)"
proof -
fix x assume "insertion x p \<noteq> 0"
then have "Determinant.det (witness_submatrix y x) \<noteq> 0" using p_def by auto
have 0:"weight_space_dim \<le> length (map x [0..<weight_space_dim])" by auto
have "r ^ N_half \<le> mrank (A' x)"
using vec_space.rank_gt_minor[OF mat_carrierI[OF dims_A'_pow, unfolded weight_space_dim_def]
`Determinant.det (witness_submatrix y x) \<noteq> 0`[unfolded witness_submatrix_def]]
card_rows_with_1[unfolded dims_Aw'_pow] by (metis (no_types, lifting) Collect_cong dims_A'_pow(1))
also have "... \<le> cprank (A x)" using matrix_rank_le_cp_rank A'_def by auto
finally show "r ^ N_half \<le> cprank (A x)" .
qed
then show ?thesis using assumption `p\<noteq>0` `vars p \<subseteq> {..<weight_space_dim}` by blast
qed
theorem fundamental_theorem_network_capacity:
"AE x in lborel_f weight_space_dim. r ^ N_half \<le> cprank (A x)"
proof -
obtain p where "p\<noteq>0" and "vars p \<subseteq> {..<weight_space_dim}"
and "\<And>x. insertion x p \<noteq> 0 \<Longrightarrow> r ^ N_half \<le> cprank (A x)"
using fundamental_theorem_network_capacity_polynomial by metis
have null_set:"{f \<in> space (lborel_f weight_space_dim). insertion f p = 0} \<in> null_sets (lborel_f weight_space_dim)"
using lebesgue_mpoly_zero_set[OF `p\<noteq>0` `vars p \<subseteq> {..<weight_space_dim}`] by simp
have subset:"{x \<in> space (lborel_f weight_space_dim). r ^ N_half > cprank (A x)}
\<subseteq> {f \<in> space (lborel_f weight_space_dim). insertion f p = 0}"
proof
fix x assume "x\<in>{x \<in> space (lborel_f weight_space_dim). r ^ N_half > cprank (A x)}"
then have "x\<in>space (lborel_f weight_space_dim)" "r ^ N_half > cprank (A x)" by auto
then have "insertion x p = 0" using `\<And>x. insertion x p \<noteq> 0 \<Longrightarrow> r ^ N_half \<le> cprank (A x)`
HOL.contrapos_nn not_le by metis
then show "x \<in> {f \<in> space (lborel_f weight_space_dim). insertion f p = 0}" using `x\<in>space (lborel_f weight_space_dim)` by blast
qed
then show ?thesis using AE_I'[OF null_set, of "\<lambda>x. r ^ N_half \<le> cprank (A x)"] leI by blast
qed
end
(*TODO: arbitrary parameters for shallow_model?*)
context deep_model_correct_params
begin
theorem null_set_tensors_equal:
shows "AE weights_deep in lborel_f weight_space_dim.
\<not> (\<exists>weights_shallow Z. Z < r ^ N_half \<and>
tensors_from_net (insert_weights (deep_model_l rs) weights_deep)
= tensors_from_net (insert_weights (shallow_model (rs ! 0) Z (last rs) (2*N_half -1)) weights_shallow))"
(is "almost_everywhere ?M ?P")
proof -
have "0 < rs ! 0" using no_zeros deep by (metis length_0_conv length_greater_0_conv not_numeral_le_zero nth_mem)
have "\<And>x. \<not> ?P x \<Longrightarrow> cprank (deep_model_correct_params_y.A rs 0 x) < r ^ N_half"
proof -
fix x assume "\<not> ?P x"
obtain weights_shallow Z where "Z < r ^ N_half"
"tensors_from_net (insert_weights (deep_model_l rs) x) =
tensors_from_net (insert_weights (shallow_model (rs ! 0) Z (last rs) (2 * N_half - 1)) weights_shallow)"
using `\<not> ?P x` by blast
then have "cprank (tensors_from_net (insert_weights (deep_model_l rs) x) $ 0) < r ^ N_half"
using cprank_shallow_model `0 < rs ! 0` remove_insert_weights by (metis not_le order_trans)
then show "cprank (deep_model_correct_params_y.A rs 0 x) < r ^ N_half"
by (simp add: `0 < rs ! 0` deep_model_correct_params_axioms
deep_model_correct_params_y.A_def deep_model_correct_params_y.intro deep_model_correct_params_y_axioms.intro)
qed
then show ?thesis using Filter.eventually_mono using deep_model_correct_params_y.fundamental_theorem_network_capacity[OF deep_model_correct_params_y.intro,
OF deep_model_correct_params_axioms, unfolded deep_model_correct_params_y_axioms_def, OF `0 < rs ! 0`]
by fastforce
qed
theorem fundamental_theorem_network_capacity_v2:
shows "AE weights_deep in lborel_f weight_space_dim.
\<not>(\<exists>weights_shallow Z. Z < r ^ N_half \<and> (\<forall>inputs. map dim\<^sub>v inputs = input_sizes (deep_model_l rs) \<longrightarrow>
evaluate_net (insert_weights (deep_model_l rs) weights_deep) inputs
= evaluate_net (insert_weights (shallow_model (rs ! 0) Z (last rs) (2*N_half-1)) weights_shallow) inputs))"
(is "almost_everywhere ?M ?P")
proof (rule Filter.eventually_mono)
show "AE weights_deep in lborel_f weight_space_dim.
\<not> (\<exists>weights_shallow Z. Z < r ^ N_half \<and>
tensors_from_net (insert_weights (deep_model_l rs) weights_deep)
= tensors_from_net (insert_weights (shallow_model (rs ! 0) Z (last rs) (2*N_half -1)) weights_shallow))"
(is "almost_everywhere ?M ?P'")
using null_set_tensors_equal by metis
have "\<And>x. \<not>?P x \<Longrightarrow> \<not>?P' x"
proof -
fix weights_deep assume "\<not>?P weights_deep"
then obtain weights_shallow Z where 0:"Z < r ^ N_half"
and eval_eq:"\<And>inputs. map dim\<^sub>v inputs = input_sizes (deep_model_l rs) \<Longrightarrow>
evaluate_net (insert_weights (deep_model_l rs) weights_deep) inputs
= evaluate_net (insert_weights (shallow_model (rs ! 0) Z (last rs) (2*N_half-1)) weights_shallow) inputs"
by blast
have "2 \<le> length rs" using deep by linarith
have "tensors_from_net (insert_weights (deep_model_l rs) weights_deep) =
tensors_from_net (insert_weights (shallow_model (rs ! 0) Z (last rs) (2 * N_half - 1)) weights_shallow)"
using tensors_from_net_eqI[OF _ _ _ eval_eq, unfolded remove_insert_weights, OF valid_deep_model valid_shallow_model]
input_sizes_deep_model[OF `2 \<le> length rs`] input_sizes_shallow_model input_sizes_remove_weights remove_insert_weights
by (metis (no_types, lifting) N_half_def Suc_diff_1 Suc_diff_Suc Suc_le_lessD deep numeral_2_eq_2 numeral_3_eq_3 power_Suc power_eq_0_iff zero_less_power zero_power2)
then show "\<not>?P' weights_deep"
using 0 by blast
qed
then show "\<And>x. ?P' x \<Longrightarrow> ?P x" by blast
qed
end
thm deep_model_correct_params.fundamental_theorem_network_capacity_v2
end
......@@ -6,7 +6,7 @@ imports Main
begin
lemma card_even[simp]: "card {a \<in> Collect even. a < 2 * n} = n"
proof (induction n)
proof (induction n)
case 0
then show ?case by auto
next
......@@ -14,13 +14,13 @@ next
have "{a \<in> Collect even. a < 2 * Suc n} = insert (2*n) {a \<in> Collect even. a < 2 * n}"
using le_eq_less_or_eq less_Suc_eq_le subset_antisym by force
show ?case
unfolding `{a \<in> Collect even. a < 2 * Suc n} = insert (2*n) {a \<in> Collect even. a < 2 * n}`
using Suc card_insert_disjoint[of "{a \<in> Collect even. a < 2 * n}" "2*n"]
unfolding `{a \<in> Collect even. a < 2 * Suc n} = insert (2*n) {a \<in> Collect even. a < 2 * n}`
using Suc card_insert_disjoint[of "{a \<in> Collect even. a < 2 * n}" "2*n"]
by (simp add: finite_M_bounded_by_nat less_not_refl2)
qed
lemma card_odd[simp]: "card {a \<in> Collect odd. a < 2 * n} = n"
proof (induction n)
proof (induction n)
case 0
then show ?case by auto
next
......@@ -28,8 +28,8 @@ next
have "{a \<in> Collect odd. a < 2 * Suc n} = insert (2*n+1) {a \<in> Collect odd. a < 2 * n}"
using le_eq_less_or_eq less_Suc_eq_le subset_antisym by force
show ?case
unfolding `{a \<in> Collect odd. a < 2 * Suc n} = insert (2*n+1) {a \<in> Collect odd. a < 2 * n}`
using Suc card_insert_disjoint[of "{a \<in> Collect even. a < 2 * n}" "2*n"]
unfolding `{a \<in> Collect odd. a < 2 * Suc n} = insert (2*n+1) {a \<in> Collect odd. a < 2 * n}`