show "\<And>f. extract_matrix (\<lambda>i. f (i + a)) x1 x2 \<in> carrier_mat x1 (output_size m)" unfolding `output_size m = x2` using dim_extract_matrix
using carrier_matI by (metis (no_types, lifting))
show "\<And>i j. i < x1 \<Longrightarrow> j < output_size m \<Longrightarrow> polyfun {..<a + count_weights (Conv (x1, x2) m)} (\<lambda>f. extract_matrix (\<lambda>i. f (i + a)) x1 x2 $$ (i, j))"
unfolding `output_size m = x2` count_weights.simps using polyfun_extract_matrix[of _ x1 _ x2 a "count_weights m"] by blast
show "\<And>i j. i < x1 \<Longrightarrow> j < output_size m \<Longrightarrow> polyfun {..<a + count_weights s (Conv (x1, x2) m)} (\<lambda>f. extract_matrix (\<lambda>i. f (i + a)) x1 x2 $$ (i, j))"
unfolding `output_size m = x2` count_weights.simps using polyfun_extract_matrix[of _ x1 _ x2 a "count_weights s m"] by blast
show "j < x1" using Conv.prems(3) \<open>x = (x1, x2)\<close> by auto
qed
next
case (Pool m1 m2 inputs j a)
have A2:"\<And>f. map dim_vec (take (length (input_sizes (insert_weights m1 (\<lambda>i. f (i + a))))) inputs) = input_sizes m1"
have A2:"\<And>f. map dim_vec (take (length (input_sizes (insert_weights s m1 (\<lambda>i. f (i + a))))) inputs) = input_sizes m1"
by (metis Pool.prems(1) append_eq_conv_conj input_sizes.simps(3) input_sizes_remove_weights remove_insert_weights take_map)
have B2:"\<And>f. map dim_vec (drop (length (input_sizes (insert_weights m1 (\<lambda>i. f (i + a))))) inputs) = input_sizes m2"
have B2:"\<And>f. map dim_vec (drop (length (input_sizes (insert_weights s m1 (\<lambda>i. f (i + a))))) inputs) = input_sizes m2"
using Pool.prems(1) append_eq_conv_conj input_sizes.simps(3) input_sizes_remove_weights remove_insert_weights by (metis drop_map)
have A3:"valid_net m1" and B3:"valid_net m2" using `valid_net (Pool m1 m2)` valid_net.simps by blast+
have "output_size (Pool m1 m2) = output_size m2" unfolding output_size.simps
using `valid_net (Pool m1 m2)` "valid_net.cases" by fastforce
then have A4:"j < output_size m1" and B4:"j < output_size m2" using `j < output_size (Pool m1 m2)` by simp_all
let ?net1 = "\<lambda>f. evaluate_net (insert_weights m1 (\<lambda>i. f (i + a)))
(take (length (input_sizes (insert_weights m1 (\<lambda>i. f (i + a))))) inputs)"
let ?net2 = "\<lambda>f. evaluate_net (insert_weights m2 (\<lambda>i. f (i + count_weights m1 + a)))
(drop (length (input_sizes (insert_weights m1 (\<lambda>i. f (i + a))))) inputs)"
let ?net1 = "\<lambda>f. evaluate_net (insert_weights s m1 (\<lambda>i. f (i + a)))
(take (length (input_sizes (insert_weights s m1 (\<lambda>i. f (i + a))))) inputs)"
let ?net2 = "\<lambda>f. evaluate_net (insert_weights s m2 (if s then \<lambda>i. f (i + a) else (\<lambda>i. f (i + count_weights s m1 + a))))
(drop (length (input_sizes (insert_weights s m1 (\<lambda>i. f (i + a))))) inputs)"
have length1: "\<And>f. output_size m1 = dim_vec (?net1 f)"
by (metis A2 A3 input_sizes_remove_weights output_size_correct remove_insert_weights)
then have jlength1:"\<And>f. j < dim_vec (?net1 f)" using A4 by metis
have length2: "\<And>f. output_size m2 = dim_vec (?net2 f)"
by (metis B2 B3 input_sizes_remove_weights output_size_correct remove_insert_weights)
then have jlength2:"\<And>f. j < dim_vec (?net2 f)" using B4 by metis
have cong1:"\<And>xf. (\<lambda>f. evaluate_net (insert_weights m1 (\<lambda>i. f (i + a)))
using Pool.IH(1)[OF A2 A3 A4, of a, unfolded cong1]
apply (simp add:polyfun_subset[of "{..<a + count_weights s m1}" "{..<a + (if s then max (count_weights s m1) (count_weights s m2) else count_weights s m1 + count_weights s m2)}"])
using Pool.IH(2)[OF B2 B3 B4, of "a + (if s then 0 else count_weights s m1)", unfolded cong2 semigroup_add_class.add.assoc[of a]]
by (simp add:polyfun_subset[of "{..<a + ((if s then 0 else count_weights s m1) + count_weights s m2)}" "{..<a + (if s then max (count_weights s m1) (count_weights s m2) else count_weights s m1 + count_weights s m2)}"])
fun insert_weights :: "(nat \<times> nat) convnet \<Rightarrow> (nat \<Rightarrow> real) \<Rightarrow> real mat convnet" where
"insert_weights (Input M) w = Input M" |
"insert_weights (Conv (r0,r1) m) w = Conv
fun insert_weights :: "bool \<Rightarrow> (nat \<times> nat) convnet \<Rightarrow> (nat \<Rightarrow> real) \<Rightarrow> real mat convnet" where
"insert_weights shared (Input M) w = Input M" |
"insert_weights shared (Conv (r0,r1) m) w = Conv
(extract_matrix w r0 r1)
(insert_weights m (\<lambda>i. w (i+r0*r1)))" |
"insert_weights (Pool m1 m2) w = Pool
(insert_weights m1 w)
(insert_weights m2 (\<lambda>i. w (i+(count_weights m1))))"
(insert_weights shared m (\<lambda>i. w (i+r0*r1)))" |
"insert_weights shared (Pool m1 m2) w = Pool
(insert_weights shared m1 w)
(insert_weights shared m2 (if shared then w else (\<lambda>i. w (i+(count_weights shared m1)))))"
fun remove_weights :: "real mat convnet \<Rightarrow> (nat \<times> nat) convnet" where
"remove_weights (Input M) = Input M" |
...
...
@@ -63,25 +66,28 @@ where "mat_tensorlist_mult A Ts ds
= Matrix.vec (dim_row A) (\<lambda>j. tensor_from_lookup ds (\<lambda>is. (A *\<^sub>v (map_vec (\<lambda>T. Tensor.lookup T is) Ts)) $j))"
lemma insert_weights_cong:
assumes "(\<And>i. i<count_weights m \<Longrightarrow> w1 i = w2 i)"
shows "insert_weights m w1 = insert_weights m w2"
assumes "(\<And>i. i<count_weights s m \<Longrightarrow> w1 i = w2 i)"
shows "insert_weights s m w1 = insert_weights s m w2"
using assms proof (induction m arbitrary: w1 w2)
case Input
then show ?case by simp
next
case (Conv r01 m)
then obtain r0 r1 where "r01 = (r0,r1)" by (meson surj_pair)
have 2:"insert_weights m (\<lambda>i. w1 (i + r0 * r1)) = insert_weights m (\<lambda>i. w2 (i + r0 * r1))" using Conv
have 2:"insert_weights s m (\<lambda>i. w1 (i + r0 * r1)) = insert_weights s m (\<lambda>i. w2 (i + r0 * r1))" using Conv
using \<open>r01 = (r0, r1)\<close> add.commute add_less_cancel_right count_weights.simps(2) by fastforce
then show ?case unfolding `r01 = (r0,r1)` insert_weights.simps
by (metis Conv.prems \<open>r01 = (r0, r1)\<close> count_weights.simps(2) extract_matrix_cong trans_less_add1)
next
case (Pool m1 m2)
have 1:"insert_weights m1 w1 = insert_weights m1 w2"
using Pool(1)[of w1 w2] Pool(3)[unfolded count_weights.simps] by simp
have 2:"insert_weights m2 (\<lambda>i. w1 (i + count_weights m1)) = insert_weights m2 (\<lambda>i. w2 (i + count_weights m1))"
using Pool(2)[of "\<lambda>i. w1 (i + count_weights m1)" "\<lambda>i. w2 (i + count_weights m1)"] Pool(3)[unfolded count_weights.simps] by simp
show ?case unfolding insert_weights.simps 1 2 by metis
have 1:"insert_weights s m1 w1 = insert_weights s m1 w2"
using Pool(1)[of w1 w2] Pool(3)[unfolded count_weights.simps]
by (cases s; auto)
have shared:"s=True \<Longrightarrow> insert_weights s m2 w1 = insert_weights s m2 w2"
using Pool(2)[of w1 w2] Pool(3)[unfolded count_weights.simps] by auto
have unshared:"s=False \<Longrightarrow> insert_weights s m2 (\<lambda>i. w1 (i + count_weights s m1)) = insert_weights s m2 (\<lambda>i. w2 (i + count_weights s m1))"
using Pool(2) Pool(3) count_weights.simps by fastforce
show ?case unfolding insert_weights.simps 1 using unshared shared by simp
qed
lemma dims_mat_tensorlist_mult:
...
...
@@ -347,40 +353,85 @@ next
then show ?case using lookup_prod IH base_input_def by auto
qed
lemma insert_remove_weights:
obtains w where "m = insert_weights (remove_weights m) w"
proof (induction m arbitrary:thesis)
case (Input m thesis)
then show ?case by simp
primrec extract_weights::"bool \<Rightarrow> real mat convnet \<Rightarrow> nat \<Rightarrow> real" where
extract_weights_Input: "extract_weights shared (Input M) = (\<lambda>x. 0)"
| extract_weights_Conv: "extract_weights shared (Conv A m) =
(\<lambda>x. if x < dim_row A * dim_col A then flatten_matrix A x
else extract_weights shared m (x - dim_row A * dim_col A))"
(\<And>x. x < count_weights True (remove_weights m1) \<Longrightarrow> extract_weights True m1 x = extract_weights True m2 x)
\<Longrightarrow> shared_weight_net (Pool m1 m2)"
lemma insert_extract_weights_cong_shared:
assumes "shared_weight_net m"
assumes "\<And>x. x < count_weights True (remove_weights m) \<Longrightarrow> f x = extract_weights True m x"
shows "m = insert_weights True (remove_weights m) f"
using assms proof (induction m arbitrary:f)
case (shared_weight_net_Input M)
then show ?case
by simp
next
case (shared_weight_net_Conv m A)
have "extract_matrix f (dim_row A) (dim_col A) = A"
by (simp add: extract_matrix_cong extract_matrix_flatten_matrix shared_weight_net_Conv.prems)
then show ?case
using shared_weight_net_Conv.IH[of "(\<lambda>i. f (i + dim_row A * dim_col A))"]
using shared_weight_net_Conv.prems by auto
next
case (Conv A m thesis)
then obtain w where "m = insert_weights (remove_weights m) w" by auto
then have 1:"remove_weights (Conv A m) = Conv (dim_row A, dim_col A) (remove_weights m)" by simp
have "Conv A m = insert_weights (remove_weights (Conv A m)) (\<lambda>i. if i<dim_row A *dim_col A then flatten_matrix A i else w (i-dim_row A *dim_col A))"
unfolding 1 insert_weights.simps
using extract_matrix_flatten_matrix[of A] extract_matrix_cong[of "dim_row A" "dim_col A"
"\<lambda>i. if i < dim_row A * dim_col A then flatten_matrix A i else w (i - dim_row A * dim_col A)" "flatten_matrix A"]
using \<open>m = insert_weights (remove_weights m) w\<close> by fastforce
then show ?case using Conv.prems by blast
case (shared_weight_net_Pool m1 m2)
have "m1 = insert_weights True (remove_weights m1) f"
using shared_weight_net_Pool.IH(1) shared_weight_net_Pool.prems by auto
have "m2 = insert_weights True (remove_weights m2) f"
using local.shared_weight_net_Pool(3) shared_weight_net_Pool.IH(2)
shared_weight_net_Pool.hyps(4) shared_weight_net_Pool.prems by fastforce
then show ?case
using \<open>m1 = insert_weights True (remove_weights m1) f\<close> by auto
qed
lemma insert_extract_weights_cong_unshared:
assumes "\<And>x. x < count_weights False (remove_weights m) \<Longrightarrow> f x = extract_weights False m x"
shows "m = insert_weights False (remove_weights m) f"
using assms proof (induction m arbitrary:f)
case (Input M)
then show ?case
by simp
next
case (Conv A m)
then have "extract_matrix f (dim_row A) (dim_col A) = A"
by (metis count_weights.simps(2) extract_matrix_flatten_matrix_cong extract_weights_Conv remove_weights.simps(2) trans_less_add1)
then show ?case
using Conv.IH Conv.prems by auto
next
case (Pool m1 m2)
then obtain w1 w2 where "m1 = insert_weights (remove_weights m1) w1" "m2 = insert_weights (remove_weights m2) w2" by metis
then have "Pool m1 m2 = insert_weights (remove_weights (Pool m1 m2)) (\<lambda>i. if i<count_weights (remove_weights m1) then w1 i else w2 (i - count_weights (remove_weights m1)))"
using insert_weights_cong[of _ "\<lambda>i. if i < count_weights (remove_weights m1) then w1 i else w2 (i - count_weights (remove_weights m1))" w1] by fastforce
then show ?case unfolding Pool using Pool.prems by blast
then show ?case
using Pool.IH(1) Pool.IH(2) Pool.prems by auto
qed
lemma remove_insert_weights:
shows "remove_weights (insert_weights m w) = m"
shows "remove_weights (insert_weights s m w) = m"
proof (induction m arbitrary:w)
case Input
then show ?case by simp
next
case (Conv r12 m)
then obtain r1 r2 where "r12 = (r1, r2)" by fastforce
then have "remove_weights (insert_weights m w) = m" using Conv.IH by blast
then have "remove_weights (insert_weights (Conv (r1,r2) m) w) = Conv (r1,r2) m"
then have "remove_weights (insert_weights s m w) = m" using Conv.IH by blast
then have "remove_weights (insert_weights s (Conv (r1,r2) m) w) = Conv (r1,r2) m"
then have "dims (tensors_from_net (insert_weights (shallow_model' Z M 0) w) $ y) = [M]"
then have "dims (tensors_from_net (insert_weights s (shallow_model' Z M 0) w) $ y) = [M]"
using dims_tensors_from_net[OF vec_setI] "0.prems"(1) output_size_correct_tensors
remove_insert_weights valid_shallow_model' by metis
then show ?case
using order1 by (metis One_nat_def eq_imp_le length_Cons list.size(3))
next
case (Suc N)
have y_le_IH:"y < dim_vec (tensors_from_net (insert_weights (shallow_model' Z M N) (\<lambda>i. w (i + (count_weights (shallow_model' Z M 0))))))"
using output_size_correct_tensors[of "insert_weights (shallow_model' Z M N) (\<lambda>i. w (i + (count_weights (shallow_model' Z M 0))))",
have y_le_IH:"y < dim_vec (tensors_from_net (insert_weights s (shallow_model' Z M N) (\<lambda>i. w (i + (count_weights s (shallow_model' Z M 0))))))"
using output_size_correct_tensors[of "insert_weights s (shallow_model' Z M N) (\<lambda>i. w (i + (count_weights s (shallow_model' Z M 0))))",
unfolded remove_insert_weights, OF valid_shallow_model']
using Suc.prems(1) output_size_shallow_model' by auto
have cprank_max1_IH:"cprank_max1 (tensors_from_net (insert_weights (shallow_model' Z M N) (\<lambda>i. w (i + (count_weights (shallow_model' Z M 0))))) $ y)"
have cprank_max1_IH:"cprank_max1 (tensors_from_net (insert_weights s (shallow_model' Z M N) (\<lambda>i. w (i + (count_weights s (shallow_model' Z M 0))))) $ y)"
using Suc.IH Suc.prems(1) output_size_shallow_model' by auto
have y_le_0:"y < dim_vec (tensors_from_net (insert_weights (shallow_model' Z M 0) w))"
have y_le_0:"y < dim_vec (tensors_from_net (insert_weights s (shallow_model' Z M 0) w))"
by (metis assms output_size_correct_tensors output_size_shallow_model' remove_insert_weights valid_shallow_model')
have cprank_max1_0:"cprank_max1 (tensors_from_net (insert_weights (shallow_model' Z M 0) w) $ y)"
have cprank_max1_0:"cprank_max1 (tensors_from_net (insert_weights s (shallow_model' Z M 0) w) $ y)"
proof -
have "input_sizes (insert_weights (shallow_model' Z M 0) w) = [M]"
have "input_sizes (insert_weights s (shallow_model' Z M 0) w) = [M]"